hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7906a9002d9b962b9ff94717c0dc4f5257c7ed23
| 1,378
|
py
|
Python
|
script.py
|
VeeraTamizhan/Unlimited-filter-bot-RJ
|
48db295558594424b4d0fe9ae8be4db5959d5abf
|
[
"MIT"
] | null | null | null |
script.py
|
VeeraTamizhan/Unlimited-filter-bot-RJ
|
48db295558594424b4d0fe9ae8be4db5959d5abf
|
[
"MIT"
] | null | null | null |
script.py
|
VeeraTamizhan/Unlimited-filter-bot-RJ
|
48db295558594424b4d0fe9ae8be4db5959d5abf
|
[
"MIT"
] | 1
|
2022-02-24T05:11:34.000Z
|
2022-02-24T05:11:34.000Z
|
class Script(object):
START_MSG = """<b>Hello {} How are you🌹,
I'm an advanced filter bot with many capabilities!
Edit By @Yash_607
See <i>/help</i> for commands and more details.</b>
"""
HELP_MSG = """
<i>Add me as admin in your group and start filtering :)</i>
<b>Basic Commands;</b>
/start - Check if I'm alive!
/help - Command help
/about - Something about me!
<b>Filter Commands;</b>
<code>/add name reply</code> - Add filter for name
<code>/del name</code> - Delete filter
<code>/delall</code> - Delete entire filters (Group Owner Only!)
<code>/viewfilters</code> - List all filters in chat
<b>Connection Commands;</b>
<code>/connect groupid</code> - Connect your group to my PM. You can also simply use,
<code>/connect</code> in groups.
<code>/connections</code> - Manage your connections.
<b>Extras;</b>
/status - Shows current status of your bot (Auth User Only)
/id - Shows ID information
<code>/info userid</code> - Shows User Information. Use <code>/info</code> as reply to some message for their details!
<b>© @RJMALLU </b>
"""
ABOUT_MSG = """⭕️<b>My Name :</b> <a href='http://t.me/Poli_ano_bot/'UNLIMITED FILTER BOT RJ</a>
⭕️<b>Creater :</b> <a href= 'https://t.me/RJMALLU/'RJ</a>
⭕️<b>Language :</b> <code>Python3</code>
⭕️<b>Library :</b> <a href='https://docs.pyrogram.org/'>Pyrogram 1.0.7</a>
"""
| 21.53125
| 120
| 0.650943
|
class Script(object):
START_MSG = """<b>Hello {} How are you🌹,
I'm an advanced filter bot with many capabilities!
Edit By @Yash_607
See <i>/help</i> for commands and more details.</b>
"""
HELP_MSG = """
<i>Add me as admin in your group and start filtering :)</i>
<b>Basic Commands;</b>
/start - Check if I'm alive!
/help - Command help
/about - Something about me!
<b>Filter Commands;</b>
<code>/add name reply</code> - Add filter for name
<code>/del name</code> - Delete filter
<code>/delall</code> - Delete entire filters (Group Owner Only!)
<code>/viewfilters</code> - List all filters in chat
<b>Connection Commands;</b>
<code>/connect groupid</code> - Connect your group to my PM. You can also simply use,
<code>/connect</code> in groups.
<code>/connections</code> - Manage your connections.
<b>Extras;</b>
/status - Shows current status of your bot (Auth User Only)
/id - Shows ID information
<code>/info userid</code> - Shows User Information. Use <code>/info</code> as reply to some message for their details!
<b>© @RJMALLU </b>
"""
ABOUT_MSG = """⭕️<b>My Name :</b> <a href='http://t.me/Poli_ano_bot/'UNLIMITED FILTER BOT RJ</a>
⭕️<b>Creater :</b> <a href= 'https://t.me/RJMALLU/'RJ</a>
⭕️<b>Language :</b> <code>Python3</code>
⭕️<b>Library :</b> <a href='https://docs.pyrogram.org/'>Pyrogram 1.0.7</a>
"""
| true
| true
|
7906ac20be35639ab45e657772265dd2cc118913
| 6,672
|
py
|
Python
|
plotting/thumbnails_warm.py
|
brberg/stokes-crevasse-advection
|
c5996d0330de5971381b4d0a9543c784b94a8918
|
[
"MIT"
] | null | null | null |
plotting/thumbnails_warm.py
|
brberg/stokes-crevasse-advection
|
c5996d0330de5971381b4d0a9543c784b94a8918
|
[
"MIT"
] | null | null | null |
plotting/thumbnails_warm.py
|
brberg/stokes-crevasse-advection
|
c5996d0330de5971381b4d0a9543c784b94a8918
|
[
"MIT"
] | null | null | null |
from __future__ import division
import numpy as np
import sys
import os
import shutil
import vtk
from vtk.util.numpy_support import vtk_to_numpy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.animation as animation
import matplotlib.colors as mcolors
import argparse
import paraview.simple as parasim
import multiprocessing as mp
import copy
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'model')))
from geometry_generation import *
matplotlib.rcParams['font.size'] = 6
import scipy.interpolate as interpsci
import seaborn as sns
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=-1):
if n == -1:
n = cmap.N
new_cmap = mcolors.LinearSegmentedColormap.from_list(
'trunc({name},{a:.2f},{b:.2f})'.format(name=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def makeImage(snapshots, geometryX, geometryY, data_names, folder_name, times):
fig = plt.figure(figsize=(7,4.72441/4*3))
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(234)
ax3 = fig.add_subplot(232)
ax4 = fig.add_subplot(235)
ax5 = fig.add_subplot(233)
ax6 = fig.add_subplot(236)
axes = [[ax1, ax2], [ax3, ax4], [ax5, ax6]]
all_axes = [ax1, ax2, ax3, ax4, ax5, ax6]
for k in range(len(snapshots[0])):
axes_current = axes[k]
values = [[],[]]
for j in range(len(data_names)):
i = snapshots[j][k]
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(data_names[j] + '{0:06d}.vtu'.format(i))
reader.Update()
data = reader.GetOutput()
points = data.GetPoints()
npts = points.GetNumberOfPoints()
x = vtk_to_numpy(points.GetData())[:, 0]
y = vtk_to_numpy(points.GetData())[:, 1]
f = vtk_to_numpy(data.GetPointData().GetArray(0))
triangles = vtk_to_numpy(data.GetCells().GetData())
ntri = triangles.size//4
tri = np.take(triangles,[m for m in range(triangles.size) if m%4 != 0]).reshape(ntri, 3)
waterX = np.linspace(0, 60000, 100)
waterY = np.zeros(100)
values[j].append(x)
values[j].append(y)
values[j].append(tri)
values[j].append(f)
levels = np.linspace(0, 1.0, 100, endpoint=True)
cmap_new = truncate_colormap(plt.get_cmap("BuPu"), 0.25, 1.0)
maxL = 51100
bed_interpolator = interpsci.interp1d(geometryX, geometryY, fill_value='extrapolate')
geometryX = np.linspace(0, 60000, 1000)
geometryY = bed_interpolator(geometryX)
axes_current[0].fill_between(waterX, -200, 0, color='#94aec4ff', zorder=-21)
axes_current[1].fill_between(waterX, -200, 0, color='#94aec4ff', zorder=-21)
axes_current[0].fill_between(geometryX, -200, geometryY, color='#c69d6eff', zorder=-18)
axes_current[1].fill_between(geometryX, -200, geometryY, color='#c69d6eff', zorder=-18)
cnt1 = axes_current[0].tricontourf(values[0][0], values[0][1], values[0][2], values[0][3]*100, 100, cmap=cmap_new, levels=levels, extend='both', zorder=-20)
cnt2 = axes_current[1].tricontourf(values[1][0], values[1][1], values[1][2], values[1][3]*100, 100, cmap=cmap_new, levels=levels, extend='both', zorder=-20)
for cnt in [cnt1, cnt2]:
for c in cnt.collections:
c.set_edgecolor("face")
axes_current[0].set_title("t = %.1f years" % (times[k]-0.5))
print("Processed file number " + str(i) + ".")
labels = ['a', 'd', 'b', 'e', 'c', 'f']
for ax in all_axes:
ax.set_xlim([49400,maxL])
ax.set_ylim([-200,100])
ax.set_rasterization_zorder(-10)
for j in range(len(all_axes)):
all_axes[j].text(0.025, 0.97, labels[j], transform=all_axes[j].transAxes, va='top', fontsize=8, weight='bold')
for ax in [ax3, ax4, ax5, ax6]:
plt.sca(ax)
ylims = plt.yticks()
print(ylims)
locs = ylims[0][1:-1]
labels = []
for j in range(len(locs)):
labels.append('%.2f'%(locs[j]))
plt.sca(ax)
plt.yticks(locs, [" "]*len(locs))
for ax in [ax1, ax3, ax5]:
plt.sca(ax)
xlims = plt.xticks()
print(xlims)
locs = xlims[0][1:-1]
labels = []
for j in range(len(locs)):
labels.append('%.2f'%(locs[j]))
plt.sca(ax)
plt.xticks(locs, [" "]*len(locs))
for ax in [ax2, ax4, ax6]:
plt.sca(ax)
labelsx = [num/1000 for num in locs]
plt.xticks(locs, labelsx)
for ax in [ax2, ax4, ax6]:
ax.set_xlabel('Distance (km)')
for ax in [ax1, ax2]:
ax.set_ylabel('Height (m)')
ax1.text(-0.5, 0.5, 'No Advection', transform=ax1.transAxes, va='center', fontsize=12, rotation='vertical')
ax2.text(-0.5, 0.5, 'Advection', transform=ax2.transAxes, va='center', fontsize=12, rotation='vertical')
plt.tight_layout(pad=1.0,h_pad=-1.0,w_pad=0.0)
fig.savefig(folder_name + "/" + "thumbnails_warm.eps", transparent=False)
plt.close(fig)
if __name__ == "__main__":
sns.set(palette='colorblind')
sns.set(font_scale=0.8)
sns.set_style(style='ticks')
starting_directory = os.getcwd()
os.chdir(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'tests')))
main_directory = os.getcwd()
geometryX, geometryY, xz_boundary = make_geometry_grounded(-0.01, 50000, -150, 50, 100, 10)
times = [0.5, 8.0, 15.5]
directories = ['warm_noad', 'warm']
dataName = 'width_timeseries'
data_names = [os.path.join(directory, dataName) for directory in directories]
snapshots = [[], []]
for i in range(len(directories)):
for j in range(len(times)):
if times[j] == int(0):
snapshots[i].append(int(0))
else:
os.chdir(directories[i])
reader_paraview = parasim.PVDReader(FileName=dataName + '.pvd')
times_imported = reader_paraview.GetPropertyValue('TimestepValues')
times_temp = 0.0
for k in range(len(times_imported)):
if times_imported[k] >= times[j] and times_temp <= times[j]:
snapshots[i].append(int(k))
break
else:
times_temp = times_imported[k]
os.chdir(main_directory)
os.chdir(main_directory)
print(snapshots)
makeImage(snapshots, geometryX, geometryY, data_names, starting_directory, times)
| 37.066667
| 164
| 0.601169
|
from __future__ import division
import numpy as np
import sys
import os
import shutil
import vtk
from vtk.util.numpy_support import vtk_to_numpy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.animation as animation
import matplotlib.colors as mcolors
import argparse
import paraview.simple as parasim
import multiprocessing as mp
import copy
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'model')))
from geometry_generation import *
matplotlib.rcParams['font.size'] = 6
import scipy.interpolate as interpsci
import seaborn as sns
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=-1):
if n == -1:
n = cmap.N
new_cmap = mcolors.LinearSegmentedColormap.from_list(
'trunc({name},{a:.2f},{b:.2f})'.format(name=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def makeImage(snapshots, geometryX, geometryY, data_names, folder_name, times):
fig = plt.figure(figsize=(7,4.72441/4*3))
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(234)
ax3 = fig.add_subplot(232)
ax4 = fig.add_subplot(235)
ax5 = fig.add_subplot(233)
ax6 = fig.add_subplot(236)
axes = [[ax1, ax2], [ax3, ax4], [ax5, ax6]]
all_axes = [ax1, ax2, ax3, ax4, ax5, ax6]
for k in range(len(snapshots[0])):
axes_current = axes[k]
values = [[],[]]
for j in range(len(data_names)):
i = snapshots[j][k]
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(data_names[j] + '{0:06d}.vtu'.format(i))
reader.Update()
data = reader.GetOutput()
points = data.GetPoints()
npts = points.GetNumberOfPoints()
x = vtk_to_numpy(points.GetData())[:, 0]
y = vtk_to_numpy(points.GetData())[:, 1]
f = vtk_to_numpy(data.GetPointData().GetArray(0))
triangles = vtk_to_numpy(data.GetCells().GetData())
ntri = triangles.size//4
tri = np.take(triangles,[m for m in range(triangles.size) if m%4 != 0]).reshape(ntri, 3)
waterX = np.linspace(0, 60000, 100)
waterY = np.zeros(100)
values[j].append(x)
values[j].append(y)
values[j].append(tri)
values[j].append(f)
levels = np.linspace(0, 1.0, 100, endpoint=True)
cmap_new = truncate_colormap(plt.get_cmap("BuPu"), 0.25, 1.0)
maxL = 51100
bed_interpolator = interpsci.interp1d(geometryX, geometryY, fill_value='extrapolate')
geometryX = np.linspace(0, 60000, 1000)
geometryY = bed_interpolator(geometryX)
axes_current[0].fill_between(waterX, -200, 0, color='#94aec4ff', zorder=-21)
axes_current[1].fill_between(waterX, -200, 0, color='#94aec4ff', zorder=-21)
axes_current[0].fill_between(geometryX, -200, geometryY, color='#c69d6eff', zorder=-18)
axes_current[1].fill_between(geometryX, -200, geometryY, color='#c69d6eff', zorder=-18)
cnt1 = axes_current[0].tricontourf(values[0][0], values[0][1], values[0][2], values[0][3]*100, 100, cmap=cmap_new, levels=levels, extend='both', zorder=-20)
cnt2 = axes_current[1].tricontourf(values[1][0], values[1][1], values[1][2], values[1][3]*100, 100, cmap=cmap_new, levels=levels, extend='both', zorder=-20)
for cnt in [cnt1, cnt2]:
for c in cnt.collections:
c.set_edgecolor("face")
axes_current[0].set_title("t = %.1f years" % (times[k]-0.5))
print("Processed file number " + str(i) + ".")
labels = ['a', 'd', 'b', 'e', 'c', 'f']
for ax in all_axes:
ax.set_xlim([49400,maxL])
ax.set_ylim([-200,100])
ax.set_rasterization_zorder(-10)
for j in range(len(all_axes)):
all_axes[j].text(0.025, 0.97, labels[j], transform=all_axes[j].transAxes, va='top', fontsize=8, weight='bold')
for ax in [ax3, ax4, ax5, ax6]:
plt.sca(ax)
ylims = plt.yticks()
print(ylims)
locs = ylims[0][1:-1]
labels = []
for j in range(len(locs)):
labels.append('%.2f'%(locs[j]))
plt.sca(ax)
plt.yticks(locs, [" "]*len(locs))
for ax in [ax1, ax3, ax5]:
plt.sca(ax)
xlims = plt.xticks()
print(xlims)
locs = xlims[0][1:-1]
labels = []
for j in range(len(locs)):
labels.append('%.2f'%(locs[j]))
plt.sca(ax)
plt.xticks(locs, [" "]*len(locs))
for ax in [ax2, ax4, ax6]:
plt.sca(ax)
labelsx = [num/1000 for num in locs]
plt.xticks(locs, labelsx)
for ax in [ax2, ax4, ax6]:
ax.set_xlabel('Distance (km)')
for ax in [ax1, ax2]:
ax.set_ylabel('Height (m)')
ax1.text(-0.5, 0.5, 'No Advection', transform=ax1.transAxes, va='center', fontsize=12, rotation='vertical')
ax2.text(-0.5, 0.5, 'Advection', transform=ax2.transAxes, va='center', fontsize=12, rotation='vertical')
plt.tight_layout(pad=1.0,h_pad=-1.0,w_pad=0.0)
fig.savefig(folder_name + "/" + "thumbnails_warm.eps", transparent=False)
plt.close(fig)
if __name__ == "__main__":
sns.set(palette='colorblind')
sns.set(font_scale=0.8)
sns.set_style(style='ticks')
starting_directory = os.getcwd()
os.chdir(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'tests')))
main_directory = os.getcwd()
geometryX, geometryY, xz_boundary = make_geometry_grounded(-0.01, 50000, -150, 50, 100, 10)
times = [0.5, 8.0, 15.5]
directories = ['warm_noad', 'warm']
dataName = 'width_timeseries'
data_names = [os.path.join(directory, dataName) for directory in directories]
snapshots = [[], []]
for i in range(len(directories)):
for j in range(len(times)):
if times[j] == int(0):
snapshots[i].append(int(0))
else:
os.chdir(directories[i])
reader_paraview = parasim.PVDReader(FileName=dataName + '.pvd')
times_imported = reader_paraview.GetPropertyValue('TimestepValues')
times_temp = 0.0
for k in range(len(times_imported)):
if times_imported[k] >= times[j] and times_temp <= times[j]:
snapshots[i].append(int(k))
break
else:
times_temp = times_imported[k]
os.chdir(main_directory)
os.chdir(main_directory)
print(snapshots)
makeImage(snapshots, geometryX, geometryY, data_names, starting_directory, times)
| true
| true
|
7906ae1847200efd26fbe70e67f61de4a3ca4af8
| 3,430
|
py
|
Python
|
nova/api/openstack/compute/plugins/v3/pause_server.py
|
vasart/nova
|
bca5004d367e0418e35f8a72fe0f2e106e977ab0
|
[
"Apache-2.0"
] | 1
|
2021-09-10T15:29:02.000Z
|
2021-09-10T15:29:02.000Z
|
nova/api/openstack/compute/plugins/v3/pause_server.py
|
PFZheng/nova
|
84be8abbccb5ddc2d7c5a7db59019ed1edb19e7f
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/plugins/v3/pause_server.py
|
PFZheng/nova
|
84be8abbccb5ddc2d7c5a7db59019ed1edb19e7f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
ALIAS = "os-pause-server"
def authorize(context, action_name):
action = 'v3:%s:%s' % (ALIAS, action_name)
extensions.extension_authorizer('compute', action)(context)
class PauseServerController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(PauseServerController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@extensions.expected_errors((404, 409))
@wsgi.action('pause')
def _pause(self, req, id, body):
"""Permit Admins to pause the server."""
ctxt = req.environ['nova.context']
authorize(ctxt, 'pause')
server = common.get_instance(self.compute_api, ctxt, id,
want_objects=True)
try:
self.compute_api.pause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'pause')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors((404, 409))
@wsgi.action('unpause')
def _unpause(self, req, id, body):
"""Permit Admins to unpause the server."""
ctxt = req.environ['nova.context']
authorize(ctxt, 'unpause')
server = common.get_instance(self.compute_api, ctxt, id,
want_objects=True)
try:
self.compute_api.unpause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unpause')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
class PauseServer(extensions.V3APIExtensionBase):
"""Enable pause/unpause server actions."""
name = "PauseServer"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = PauseServerController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
| 36.88172
| 79
| 0.680466
|
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
ALIAS = "os-pause-server"
def authorize(context, action_name):
action = 'v3:%s:%s' % (ALIAS, action_name)
extensions.extension_authorizer('compute', action)(context)
class PauseServerController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(PauseServerController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@extensions.expected_errors((404, 409))
@wsgi.action('pause')
def _pause(self, req, id, body):
ctxt = req.environ['nova.context']
authorize(ctxt, 'pause')
server = common.get_instance(self.compute_api, ctxt, id,
want_objects=True)
try:
self.compute_api.pause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'pause')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors((404, 409))
@wsgi.action('unpause')
def _unpause(self, req, id, body):
ctxt = req.environ['nova.context']
authorize(ctxt, 'unpause')
server = common.get_instance(self.compute_api, ctxt, id,
want_objects=True)
try:
self.compute_api.unpause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unpause')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
class PauseServer(extensions.V3APIExtensionBase):
name = "PauseServer"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = PauseServerController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
| true
| true
|
7906af3a6870c8c32f4ef707eafb3a1c947bca32
| 1,428
|
py
|
Python
|
aiothrift/errors.py
|
achimnol/aiothrift
|
8d46e78e7d5f0c5eccf8e1afaf73786e2077b06b
|
[
"MIT"
] | null | null | null |
aiothrift/errors.py
|
achimnol/aiothrift
|
8d46e78e7d5f0c5eccf8e1afaf73786e2077b06b
|
[
"MIT"
] | null | null | null |
aiothrift/errors.py
|
achimnol/aiothrift
|
8d46e78e7d5f0c5eccf8e1afaf73786e2077b06b
|
[
"MIT"
] | null | null | null |
from thriftpy2.thrift import TType
class ThriftError(Exception):
""" Base Exception defined by `aiothrift` """
class ConnectionClosedError(ThriftError):
"""Raised if connection to server was closed."""
class PoolClosedError(ThriftError):
"""Raised when operating on a closed thrift connection pool"""
class ThriftAppError(ThriftError):
"""Application level thrift exceptions."""
thrift_spec = {
1: (TType.STRING, 'message', False),
2: (TType.I32, 'type', False),
}
UNKNOWN = 0
UNKNOWN_METHOD = 1
INVALID_MESSAGE_TYPE = 2
WRONG_METHOD_NAME = 3
BAD_SEQUENCE_ID = 4
MISSING_RESULT = 5
INTERNAL_ERROR = 6
PROTOCOL_ERROR = 7
def __init__(self, type=UNKNOWN, message=None):
super().__init__()
self.type = type
self.message = message
def __str__(self):
if self.message:
return self.message
if self.type == self.UNKNOWN_METHOD:
return 'Unknown method'
elif self.type == self.INVALID_MESSAGE_TYPE:
return 'Invalid message type'
elif self.type == self.WRONG_METHOD_NAME:
return 'Wrong method name'
elif self.type == self.BAD_SEQUENCE_ID:
return 'Bad sequence ID'
elif self.type == self.MISSING_RESULT:
return 'Missing result'
else:
return 'Default (unknown) TApplicationException'
| 26.444444
| 66
| 0.633053
|
from thriftpy2.thrift import TType
class ThriftError(Exception):
class ConnectionClosedError(ThriftError):
class PoolClosedError(ThriftError):
class ThriftAppError(ThriftError):
thrift_spec = {
1: (TType.STRING, 'message', False),
2: (TType.I32, 'type', False),
}
UNKNOWN = 0
UNKNOWN_METHOD = 1
INVALID_MESSAGE_TYPE = 2
WRONG_METHOD_NAME = 3
BAD_SEQUENCE_ID = 4
MISSING_RESULT = 5
INTERNAL_ERROR = 6
PROTOCOL_ERROR = 7
def __init__(self, type=UNKNOWN, message=None):
super().__init__()
self.type = type
self.message = message
def __str__(self):
if self.message:
return self.message
if self.type == self.UNKNOWN_METHOD:
return 'Unknown method'
elif self.type == self.INVALID_MESSAGE_TYPE:
return 'Invalid message type'
elif self.type == self.WRONG_METHOD_NAME:
return 'Wrong method name'
elif self.type == self.BAD_SEQUENCE_ID:
return 'Bad sequence ID'
elif self.type == self.MISSING_RESULT:
return 'Missing result'
else:
return 'Default (unknown) TApplicationException'
| true
| true
|
7906af5dc7d6b3501b3f0ae5b91b007ee13c7bdf
| 1,077
|
py
|
Python
|
MDRSREID/utils/data_utils/transforms/torch_transforms/__init__.py
|
nickhuang1996/HJL-re-id
|
107b25f31c961f360f69560cfddd78dfc0da3291
|
[
"MIT"
] | 43
|
2020-09-20T09:40:04.000Z
|
2022-03-29T11:25:22.000Z
|
MDRSREID/utils/data_utils/transforms/torch_transforms/__init__.py
|
nickhuang1996/HJL-re-id
|
107b25f31c961f360f69560cfddd78dfc0da3291
|
[
"MIT"
] | 19
|
2020-10-05T05:35:38.000Z
|
2021-12-10T03:17:31.000Z
|
MDRSREID/utils/data_utils/transforms/torch_transforms/__init__.py
|
nickhuang1996/HJL-re-id
|
107b25f31c961f360f69560cfddd78dfc0da3291
|
[
"MIT"
] | 18
|
2020-10-01T14:41:53.000Z
|
2021-09-02T06:57:57.000Z
|
from .hflip import hflip
from .resize import resize
from .pad import pad
from .random_crop import random_crop
from .to_tensor import to_tensor
from .random_erasing import random_erasing
from .random_sized_rect_crop import random_sized_rect_crop
def transforms(item, cfg, mode):
"""
:param item: sample = deepcopy(self.items[index])
:param cfg: cfg
:return:
eval() transform str to list, dict, tuple. Here is a series of the transform methods in turn.
"""
transforms_dataset_factory = {
'train': cfg.dataset.train,
'test': cfg.dataset.test
}
if transforms_dataset_factory[mode].before_to_tensor_transform_list is not None:
for t in transforms_dataset_factory[mode].before_to_tensor_transform_list:
item = eval('{}(item, cfg)'.format(t))
item = to_tensor(item, cfg)
if transforms_dataset_factory[mode].after_to_tensor_transform_list is not None:
for t in transforms_dataset_factory[mode].after_to_tensor_transform_list:
item = eval('{}(item, cfg)'.format(t))
return item
| 34.741935
| 97
| 0.717734
|
from .hflip import hflip
from .resize import resize
from .pad import pad
from .random_crop import random_crop
from .to_tensor import to_tensor
from .random_erasing import random_erasing
from .random_sized_rect_crop import random_sized_rect_crop
def transforms(item, cfg, mode):
transforms_dataset_factory = {
'train': cfg.dataset.train,
'test': cfg.dataset.test
}
if transforms_dataset_factory[mode].before_to_tensor_transform_list is not None:
for t in transforms_dataset_factory[mode].before_to_tensor_transform_list:
item = eval('{}(item, cfg)'.format(t))
item = to_tensor(item, cfg)
if transforms_dataset_factory[mode].after_to_tensor_transform_list is not None:
for t in transforms_dataset_factory[mode].after_to_tensor_transform_list:
item = eval('{}(item, cfg)'.format(t))
return item
| true
| true
|
7906af9f7b6248cbd3ecd63984c6b97e4c8ac39a
| 12,634
|
py
|
Python
|
Ransomulator/ransomulator.py
|
naul1/BloodHound-Tools
|
3b2dfcfbae0b64de32daabcd6fe1c9ac205c62a8
|
[
"Apache-2.0"
] | null | null | null |
Ransomulator/ransomulator.py
|
naul1/BloodHound-Tools
|
3b2dfcfbae0b64de32daabcd6fe1c9ac205c62a8
|
[
"Apache-2.0"
] | null | null | null |
Ransomulator/ransomulator.py
|
naul1/BloodHound-Tools
|
3b2dfcfbae0b64de32daabcd6fe1c9ac205c62a8
|
[
"Apache-2.0"
] | null | null | null |
from neo4j import GraphDatabase
from argparse import ArgumentParser
from concurrent.futures import ThreadPoolExecutor,as_completed,thread
import sys
import csv
from time import time
PRACTICAL = 'practical'
LOGICAL = 'logical'
NETONLY = 'netonly'
ALL = 'all'
PRIVS = 'privileged'
rans = None
def time_to_str(total_time):
hours, rem = divmod(total_time, 3600)
minutes, seconds = divmod(rem, 60)
return "{:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), seconds)
class ransomulator(object):
def __init__(self,user,password,url,maxwaves,edges,simulate,start_hosts,workers=25):
self.url = url
self.username = user
self.password = password
self.use_encryption = False
self.driver = None
self.connected = False
self.maxwaves = 1 if LOGICAL in simulate else maxwaves
self.session = None
self.edges = edges
self.simulate = simulate
self.workers = workers
self.executor = ThreadPoolExecutor(max_workers=workers)
self.start_hosts = start_hosts
def connect(self):
self.connected = False
if self.driver is not None:
self.driver.close()
try:
self.driver = GraphDatabase.driver(
self.url, auth=(self.username, self.password), encrypted=self.use_encryption)
self.connected = True
print("Database Connection Successful.")
except:
self.connected = False
print("Database Connection Failed.")
return self.connected
def get_start_computers(self):
if(self.start_hosts == ALL):
print("Collecting all computer nodes from database...")
result = self.session.run("MATCH (c:Computer) RETURN DISTINCT id(c) AS computer_id, c.name AS computer_name")
else:
print("Collecting computer nodes who have privileged user session from database...")
result = self.session.run("MATCH(g:Group)-[:AdminTo]->(c:Computer) WITH DISTINCT g MATCH ShortestPath((u:User)-[:MemberOf*0..]->(g)) WITH DISTINCT u as privU MATCH(c: Computer)-[: HasSession]->(privU) RETURN DISTINCT c.name AS computer_name")
computers = []
for record in result:
computers.append(record["computer_name"])
return computers
def count_computers(self):
result = self.session.run("MATCH (c:Computer) RETURN count(DISTINCT id(c)) as num_computers")
for record in result:
return record['num_computers']
def generate_wave_query_string(self):
if LOGICAL in self.simulate:
return 'MATCH shortestPath((src:Computer)-[: HasSession | MemberOf | AdminTo * 1..]->(dest:Computer)) WHERE src <> dest AND src.name IN $last_wave AND NOT dest IN $last_wave RETURN COLLECT(DISTINCT(dest.name)) AS next_wave'
elif NETONLY in self.simulate:
return 'MATCH (src:Computer)-[:Open]->(dest:Computer) WHERE src.name IN $last_wave AND NOT dest.name IN $last_wave RETURN COLLECT(DISTINCT(dest.name)) AS next_wave'
elif PRACTICAL in self.simulate:
return 'MATCH (src:Computer)-[:Open]->(dest:Computer) WHERE src.name IN $last_wave AND NOT dest.name IN $last_wave WITH src,dest MATCH (src)-[:HasSession]->(u:User) WITH dest,u MATCH shortestPath((u)-[:MemberOf|AdminTo*1..]->(dest)) RETURN COLLECT(DISTINCT(dest.name)) AS next_wave'
else:
return None
def simulate_wave_for_computer(self,computer_name):
last_wave = [computer_name]
computer_waves = [computer_name]
waves = []
total = 0
for wave in range(self.maxwaves):
w_str = self.generate_wave_query_string()
mysession = self.driver.session()
result = mysession.run(w_str,last_wave=last_wave)
for record in result:
next_wave = record["next_wave"]
wave_size = len(next_wave)
total += wave_size
waves.append(str(wave_size))
last_wave += next_wave
if wave_size == 0:
mysession.close()
return total,waves
computer_waves.append(last_wave.copy())
mysession.close()
return total,waves
def somulate(self):
waves_dict = {}
max_wavelen = 0
avg_wavelen = 0
max_total = 0
total_comps= 0
computers_in_environment = 0
score = 0
try:
if not self.connected:
print("Can't simulate without a valid DB connection!")
else:
self.session = self.driver.session()
computers = self.get_start_computers()
print("Running simulation...")
computers_in_environment = self.count_computers()
future_to_totals_waves_pairs = {self.executor.submit(self.simulate_wave_for_computer,computer): computer for computer in computers}
for future in as_completed(future_to_totals_waves_pairs):
computer = future_to_totals_waves_pairs[future]
try:
total_waves_pair = future.result()
total = total_waves_pair[0]
waves = total_waves_pair[1]
score += total
if total > 0:
total_comps += 1
if len(waves) > max_wavelen:
max_wavelen = len(waves)
if total > max_total: max_total = total
avg_wavelen += len(waves)
waves_dict[computer] = {"total":total,"waves":waves}
print("{},{},{}".format(computer,str(total),",".join(waves)))
else:
waves_dict[computer] = {"total": 0, "waves": ['0']}
print("{} - no waves".format(computer))
except Exception as exc:
print('Exception while processing %s: %s' % (computer, exc))
if total_comps > 0:
avg_wavelen = avg_wavelen / total_comps
score = round((score / (computers_in_environment**2))*100)
else:
avg_wavelen = 0
sorted_waves = {k: v for k,v in sorted(waves_dict.items(),key=lambda item: item[1]["total"],reverse=True)}
return sorted_waves,max_wavelen,avg_wavelen,max_total,total_comps,computers_in_environment,score
except Exception as err:
print("Error during simulation: {}".format(err))
def get_waves_for_computer(self, computer):
try:
if not self.connected:
print("Can't create query without a valid DB connection!")
else:
self.session = self.driver.session()
total,waves,computer_waves = self.simulate_wave_for_computer(computer)
return computer_waves
except Exception as err:
print("Error during simulation: {}".format(err))
def stop(self):
print("Stopping execution...")
self.executor._threads.clear()
thread._threads_queues.clear()
print("Execution stopped...")
def output_csv(file_path,wv_dict,max_wave_len):
print("Writing results to file {}".format(file_path))
with open(file_path,'w',encoding="utf-8",newline='') as csvfile:
wave_headers = ['wave_' + str(x + 1) for x in range(max_wave_len)]
header = ['Hostname','Total'] + wave_headers
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(header)
for k in wv_dict:
row = [k,wv_dict[k]["total"]] + wv_dict[k]["waves"]
writer.writerow(row)
def simulate(user,password,url,maxwaves,edges,simulate,workers,start_hosts):
global rans
start_time = time()
rans = ransomulator(user, password, url, maxwaves, edges, simulate,start_hosts,workers)
if rans.connect():
sorted_waves, max_wavelen, avg_wavelen, max_total, total_comps, num_of_computers, score = rans.somulate()
if outfile:
output_csv(outfile, sorted_waves, max_wavelen)
else:
print("Error during connection...")
elapsed = time_to_str(time() - start_time)
print("Ransomulator done: {}".format(elapsed))
print("-----------------------------")
print("Fragility score:\t{}%".format(score))
print("Max number of computers:\t{}".format(num_of_computers))
print("Total computers with paths:\t{}".format(total_comps))
print("Max compromised :\t{}".format(max_total))
print("Avg wave length:\t{}".format(round(avg_wavelen, 1)))
print("Max wave length:\t{}".format(max_wavelen))
def create_query(computer,user, password, url, maxwaves, edges, simulate):
if LOGICAL in simulate:
return 'MATCH shortestPath((src:Computer)-[:HasSession|MemberOf|AdminTo* 1..]->(dest:Computer)) WHERE src <> dest AND src.name IN $last_wave AND NOT dest IN $last_wave RETURN COLLECT(DISTINCT(dest.name)) AS next_wave'
elif NETONLY in simulate:
return 'MATCH (src:Computer)-[:Open]->(dest:Computer) WHERE src.name IN $last_wave AND NOT dest.name IN $last_wave RETURN COLLECT(DISTINCT(dest.name)) AS next_wave'
elif PRACTICAL in simulate:
return 'MATCH (src:Computer)-[:Open]->(dest:Computer) WHERE src.name IN $last_wave AND NOT dest.name IN $last_wave WITH src,dest MATCH (src)-[:HasSession]->(u:User) WITH dest,u MATCH shortestPath((u)-[:MemberOf|AdminTo*1..]->(dest)) RETURN COLLECT(DISTINCT(dest.name)) AS next_wave'
else:
return None
def parse_args():
parser = ArgumentParser(prog=ArgumentParser().prog,prefix_chars="-/",add_help=False,description="Simulate ransomware infection through Bloodhound's database")
parser.add_argument('-h', '--help', '/?', '/h', '/help', action='help', help='show this help message and exit')
parser.add_argument('-s', '--simulate', metavar='', dest='simulate', choices=[PRACTICAL, LOGICAL, NETONLY],default=LOGICAL,help='type of lateral movement to simulate. choices: [%(choices)s], (default: logical).')
parser.add_argument('-c', '--computers', metavar='', dest='computers', choices=[ALL,PRIVS], default=ALL, help='which computer edges should be considered as the starting point. choices: [%(choices)s], (default: all)')
parser.add_argument("-u", "--user", dest='user', metavar='', help="Neo4j DB user name", type=str, default="neo4j")
parser.add_argument("-p", "--pass", dest='password', metavar='', help="Neo4j DB password", type=str,default="neo4j")
parser.add_argument("-l", "--url", dest="url", metavar="", help="Neo4j URL", default="bolt://localhost:7687",type=str)
parser.add_argument("-m", "--maxwaves", dest="maxwaves", type=int, default=3,help="maximal number of simulated attack waves")
parser.add_argument("-o", "--output", dest='out_file', metavar='', help="output file name", type=str,default=None)
parser.add_argument("-e","--edges", dest="edges", type=str,default="MemberOf",help="Logical edges between hosts")
parser.add_argument("-w","--workers",dest="workers",type=int,default=25,help="Number of paraller queries to the database")
subprasers = parser.add_subparsers(dest="command")
# sim_parser = subprasers.add_parser('simulate',help='simulate infection waves')
q_parser = subprasers.add_parser('query',help='generate Cypher query')
q_parser.add_argument("computer", type=str, help="starting from computer name")
# parser.add_argument("-a", "--all", dest="do_all", action="store_true", help="Run through all nodes")
args = parser.parse_args()
return args
if __name__ == '__main__':
try:
args = parse_args()
command = args.command
sim = args.simulate
user = args.user
password = args.password
url = args.url
maxwaves = args.maxwaves
edges = args.edges
outfile = args.out_file
workers = args.workers
start_hosts = args.computers
if command and "query" in command:
computer = args.computer
print(create_query(computer,user, password, url, maxwaves, edges, sim))
else:
simulate(user, password, url, maxwaves, edges, sim,workers,start_hosts)
except KeyboardInterrupt:
print("Interrupted! exiting...")
if rans:
rans.stop()
except Exception as err:
print("Exception thrown: {}".format(err))
finally:
sys.exit()
| 45.941818
| 294
| 0.620389
|
from neo4j import GraphDatabase
from argparse import ArgumentParser
from concurrent.futures import ThreadPoolExecutor,as_completed,thread
import sys
import csv
from time import time
PRACTICAL = 'practical'
LOGICAL = 'logical'
NETONLY = 'netonly'
ALL = 'all'
PRIVS = 'privileged'
rans = None
def time_to_str(total_time):
hours, rem = divmod(total_time, 3600)
minutes, seconds = divmod(rem, 60)
return "{:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), seconds)
class ransomulator(object):
def __init__(self,user,password,url,maxwaves,edges,simulate,start_hosts,workers=25):
self.url = url
self.username = user
self.password = password
self.use_encryption = False
self.driver = None
self.connected = False
self.maxwaves = 1 if LOGICAL in simulate else maxwaves
self.session = None
self.edges = edges
self.simulate = simulate
self.workers = workers
self.executor = ThreadPoolExecutor(max_workers=workers)
self.start_hosts = start_hosts
def connect(self):
self.connected = False
if self.driver is not None:
self.driver.close()
try:
self.driver = GraphDatabase.driver(
self.url, auth=(self.username, self.password), encrypted=self.use_encryption)
self.connected = True
print("Database Connection Successful.")
except:
self.connected = False
print("Database Connection Failed.")
return self.connected
def get_start_computers(self):
if(self.start_hosts == ALL):
print("Collecting all computer nodes from database...")
result = self.session.run("MATCH (c:Computer) RETURN DISTINCT id(c) AS computer_id, c.name AS computer_name")
else:
print("Collecting computer nodes who have privileged user session from database...")
result = self.session.run("MATCH(g:Group)-[:AdminTo]->(c:Computer) WITH DISTINCT g MATCH ShortestPath((u:User)-[:MemberOf*0..]->(g)) WITH DISTINCT u as privU MATCH(c: Computer)-[: HasSession]->(privU) RETURN DISTINCT c.name AS computer_name")
computers = []
for record in result:
computers.append(record["computer_name"])
return computers
def count_computers(self):
result = self.session.run("MATCH (c:Computer) RETURN count(DISTINCT id(c)) as num_computers")
for record in result:
return record['num_computers']
def generate_wave_query_string(self):
if LOGICAL in self.simulate:
return 'MATCH shortestPath((src:Computer)-[: HasSession | MemberOf | AdminTo * 1..]->(dest:Computer)) WHERE src <> dest AND src.name IN $last_wave AND NOT dest IN $last_wave RETURN COLLECT(DISTINCT(dest.name)) AS next_wave'
elif NETONLY in self.simulate:
return 'MATCH (src:Computer)-[:Open]->(dest:Computer) WHERE src.name IN $last_wave AND NOT dest.name IN $last_wave RETURN COLLECT(DISTINCT(dest.name)) AS next_wave'
elif PRACTICAL in self.simulate:
return 'MATCH (src:Computer)-[:Open]->(dest:Computer) WHERE src.name IN $last_wave AND NOT dest.name IN $last_wave WITH src,dest MATCH (src)-[:HasSession]->(u:User) WITH dest,u MATCH shortestPath((u)-[:MemberOf|AdminTo*1..]->(dest)) RETURN COLLECT(DISTINCT(dest.name)) AS next_wave'
else:
return None
def simulate_wave_for_computer(self,computer_name):
last_wave = [computer_name]
computer_waves = [computer_name]
waves = []
total = 0
for wave in range(self.maxwaves):
w_str = self.generate_wave_query_string()
mysession = self.driver.session()
result = mysession.run(w_str,last_wave=last_wave)
for record in result:
next_wave = record["next_wave"]
wave_size = len(next_wave)
total += wave_size
waves.append(str(wave_size))
last_wave += next_wave
if wave_size == 0:
mysession.close()
return total,waves
computer_waves.append(last_wave.copy())
mysession.close()
return total,waves
def somulate(self):
waves_dict = {}
max_wavelen = 0
avg_wavelen = 0
max_total = 0
total_comps= 0
computers_in_environment = 0
score = 0
try:
if not self.connected:
print("Can't simulate without a valid DB connection!")
else:
self.session = self.driver.session()
computers = self.get_start_computers()
print("Running simulation...")
computers_in_environment = self.count_computers()
future_to_totals_waves_pairs = {self.executor.submit(self.simulate_wave_for_computer,computer): computer for computer in computers}
for future in as_completed(future_to_totals_waves_pairs):
computer = future_to_totals_waves_pairs[future]
try:
total_waves_pair = future.result()
total = total_waves_pair[0]
waves = total_waves_pair[1]
score += total
if total > 0:
total_comps += 1
if len(waves) > max_wavelen:
max_wavelen = len(waves)
if total > max_total: max_total = total
avg_wavelen += len(waves)
waves_dict[computer] = {"total":total,"waves":waves}
print("{},{},{}".format(computer,str(total),",".join(waves)))
else:
waves_dict[computer] = {"total": 0, "waves": ['0']}
print("{} - no waves".format(computer))
except Exception as exc:
print('Exception while processing %s: %s' % (computer, exc))
if total_comps > 0:
avg_wavelen = avg_wavelen / total_comps
score = round((score / (computers_in_environment**2))*100)
else:
avg_wavelen = 0
sorted_waves = {k: v for k,v in sorted(waves_dict.items(),key=lambda item: item[1]["total"],reverse=True)}
return sorted_waves,max_wavelen,avg_wavelen,max_total,total_comps,computers_in_environment,score
except Exception as err:
print("Error during simulation: {}".format(err))
def get_waves_for_computer(self, computer):
try:
if not self.connected:
print("Can't create query without a valid DB connection!")
else:
self.session = self.driver.session()
total,waves,computer_waves = self.simulate_wave_for_computer(computer)
return computer_waves
except Exception as err:
print("Error during simulation: {}".format(err))
def stop(self):
print("Stopping execution...")
self.executor._threads.clear()
thread._threads_queues.clear()
print("Execution stopped...")
def output_csv(file_path,wv_dict,max_wave_len):
print("Writing results to file {}".format(file_path))
with open(file_path,'w',encoding="utf-8",newline='') as csvfile:
wave_headers = ['wave_' + str(x + 1) for x in range(max_wave_len)]
header = ['Hostname','Total'] + wave_headers
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(header)
for k in wv_dict:
row = [k,wv_dict[k]["total"]] + wv_dict[k]["waves"]
writer.writerow(row)
def simulate(user,password,url,maxwaves,edges,simulate,workers,start_hosts):
global rans
start_time = time()
rans = ransomulator(user, password, url, maxwaves, edges, simulate,start_hosts,workers)
if rans.connect():
sorted_waves, max_wavelen, avg_wavelen, max_total, total_comps, num_of_computers, score = rans.somulate()
if outfile:
output_csv(outfile, sorted_waves, max_wavelen)
else:
print("Error during connection...")
elapsed = time_to_str(time() - start_time)
print("Ransomulator done: {}".format(elapsed))
print("-----------------------------")
print("Fragility score:\t{}%".format(score))
print("Max number of computers:\t{}".format(num_of_computers))
print("Total computers with paths:\t{}".format(total_comps))
print("Max compromised :\t{}".format(max_total))
print("Avg wave length:\t{}".format(round(avg_wavelen, 1)))
print("Max wave length:\t{}".format(max_wavelen))
def create_query(computer,user, password, url, maxwaves, edges, simulate):
if LOGICAL in simulate:
return 'MATCH shortestPath((src:Computer)-[:HasSession|MemberOf|AdminTo* 1..]->(dest:Computer)) WHERE src <> dest AND src.name IN $last_wave AND NOT dest IN $last_wave RETURN COLLECT(DISTINCT(dest.name)) AS next_wave'
elif NETONLY in simulate:
return 'MATCH (src:Computer)-[:Open]->(dest:Computer) WHERE src.name IN $last_wave AND NOT dest.name IN $last_wave RETURN COLLECT(DISTINCT(dest.name)) AS next_wave'
elif PRACTICAL in simulate:
return 'MATCH (src:Computer)-[:Open]->(dest:Computer) WHERE src.name IN $last_wave AND NOT dest.name IN $last_wave WITH src,dest MATCH (src)-[:HasSession]->(u:User) WITH dest,u MATCH shortestPath((u)-[:MemberOf|AdminTo*1..]->(dest)) RETURN COLLECT(DISTINCT(dest.name)) AS next_wave'
else:
return None
def parse_args():
parser = ArgumentParser(prog=ArgumentParser().prog,prefix_chars="-/",add_help=False,description="Simulate ransomware infection through Bloodhound's database")
parser.add_argument('-h', '--help', '/?', '/h', '/help', action='help', help='show this help message and exit')
parser.add_argument('-s', '--simulate', metavar='', dest='simulate', choices=[PRACTICAL, LOGICAL, NETONLY],default=LOGICAL,help='type of lateral movement to simulate. choices: [%(choices)s], (default: logical).')
parser.add_argument('-c', '--computers', metavar='', dest='computers', choices=[ALL,PRIVS], default=ALL, help='which computer edges should be considered as the starting point. choices: [%(choices)s], (default: all)')
parser.add_argument("-u", "--user", dest='user', metavar='', help="Neo4j DB user name", type=str, default="neo4j")
parser.add_argument("-p", "--pass", dest='password', metavar='', help="Neo4j DB password", type=str,default="neo4j")
parser.add_argument("-l", "--url", dest="url", metavar="", help="Neo4j URL", default="bolt://localhost:7687",type=str)
parser.add_argument("-m", "--maxwaves", dest="maxwaves", type=int, default=3,help="maximal number of simulated attack waves")
parser.add_argument("-o", "--output", dest='out_file', metavar='', help="output file name", type=str,default=None)
parser.add_argument("-e","--edges", dest="edges", type=str,default="MemberOf",help="Logical edges between hosts")
parser.add_argument("-w","--workers",dest="workers",type=int,default=25,help="Number of paraller queries to the database")
subprasers = parser.add_subparsers(dest="command")
# sim_parser = subprasers.add_parser('simulate',help='simulate infection waves')
q_parser = subprasers.add_parser('query',help='generate Cypher query')
q_parser.add_argument("computer", type=str, help="starting from computer name")
# parser.add_argument("-a", "--all", dest="do_all", action="store_true", help="Run through all nodes")
args = parser.parse_args()
return args
if __name__ == '__main__':
try:
args = parse_args()
command = args.command
sim = args.simulate
user = args.user
password = args.password
url = args.url
maxwaves = args.maxwaves
edges = args.edges
outfile = args.out_file
workers = args.workers
start_hosts = args.computers
if command and "query" in command:
computer = args.computer
print(create_query(computer,user, password, url, maxwaves, edges, sim))
else:
simulate(user, password, url, maxwaves, edges, sim,workers,start_hosts)
except KeyboardInterrupt:
print("Interrupted! exiting...")
if rans:
rans.stop()
except Exception as err:
print("Exception thrown: {}".format(err))
finally:
sys.exit()
| true
| true
|
7906b06464613a5924f46ed3e2eb398049fa0b75
| 598
|
py
|
Python
|
pfrl/wrappers/__init__.py
|
g-votte/pfrl
|
4c30c1d73f0941a2b649b62937eec346bb55a95e
|
[
"MIT"
] | 1
|
2021-07-07T04:23:56.000Z
|
2021-07-07T04:23:56.000Z
|
pfrl/wrappers/__init__.py
|
g-votte/pfrl
|
4c30c1d73f0941a2b649b62937eec346bb55a95e
|
[
"MIT"
] | null | null | null |
pfrl/wrappers/__init__.py
|
g-votte/pfrl
|
4c30c1d73f0941a2b649b62937eec346bb55a95e
|
[
"MIT"
] | null | null | null |
from pfrl.wrappers.cast_observation import CastObservation # NOQA
from pfrl.wrappers.cast_observation import CastObservationToFloat32 # NOQA
from pfrl.wrappers.continuing_time_limit import ContinuingTimeLimit # NOQA
from pfrl.wrappers.monitor import Monitor # NOQA
from pfrl.wrappers.normalize_action_space import NormalizeActionSpace # NOQA
from pfrl.wrappers.randomize_action import RandomizeAction # NOQA
from pfrl.wrappers.render import Render # NOQA
from pfrl.wrappers.scale_reward import ScaleReward # NOQA
from pfrl.wrappers.vector_frame_stack import VectorFrameStack # NOQA
| 35.176471
| 77
| 0.837793
|
from pfrl.wrappers.cast_observation import CastObservation
from pfrl.wrappers.cast_observation import CastObservationToFloat32
from pfrl.wrappers.continuing_time_limit import ContinuingTimeLimit
from pfrl.wrappers.monitor import Monitor
from pfrl.wrappers.normalize_action_space import NormalizeActionSpace
from pfrl.wrappers.randomize_action import RandomizeAction
from pfrl.wrappers.render import Render
from pfrl.wrappers.scale_reward import ScaleReward
from pfrl.wrappers.vector_frame_stack import VectorFrameStack
| true
| true
|
7906b0e143504da4fc4f5976052ebd0cd9d3a193
| 3,552
|
py
|
Python
|
chp/babel/bkb-service.py
|
di2ag/bkb-pathway-provider
|
42824f22868c5c5d777da3facb4209744bcc6f96
|
[
"MIT"
] | null | null | null |
chp/babel/bkb-service.py
|
di2ag/bkb-pathway-provider
|
42824f22868c5c5d777da3facb4209744bcc6f96
|
[
"MIT"
] | 7
|
2021-01-13T22:25:46.000Z
|
2021-07-29T15:26:06.000Z
|
chp/babel/bkb-service.py
|
NCATSTranslator/chp
|
00668fd3d50a48fdd75abbeacaf173a3ad41942d
|
[
"Apache-2.0"
] | 2
|
2021-01-14T19:06:24.000Z
|
2021-01-26T15:02:12.000Z
|
'''
Source code developed by DI2AG.
Thayer School of Engineering at Dartmouth College
Authors: Dr. Eugene Santos, Jr
Mr. Chase Yakaboski,
Mr. Gregory Hyde,
Dr. Keum Joo Kim
'''
import json
import argparse
import os
import sys
import pickle
import subprocess
from chp.query import Query
PASSED_JSON_FILE = '/home/cyakaboski/passed_message.json'
NODE = 'c-dell-m630-0-11'
SAVE_DIR = '/home/cyakaboski/temp'
BKB_PATHWAY_CORE_DIR = '/home/cyakaboski/src/python/projects/bkb-pathway-provider/core'
'''
PASSED_JSON_FILE = '/home/ncats/passed_message.json'
NODE = 'c-dell-m630-0-11'
SAVE_DIR = '/home/ncats/tmp'
BKB_PATHWAY_CORE_DIR = '/home/ncats/live/core'
'''
def processUiQuery(dict_):
query_dict = dict()
query_dict['name'] = dict_['name']
query_dict['evidence'] = dict_['genetic_evidence']
query_dict['targets'] = dict_['genetic_targets']
if dict_['demographic_evidence'] is not None:
query_dict['meta_evidence'] = [tuple(demo) for demo in dict_['demographic_evidence']]
else:
query_dict['meta_evidence'] = None
if dict_['demographic_targets'] is not None:
query_dict['meta_targets'] = [tuple(demo) for demo in dict_['demographic_targets']]
else:
query_dict['meta_targets'] = None
query = Query(**query_dict)
return query
def consumeJsonFile(file_name):
with open(file_name, 'r') as passed_file:
query_dict = json.load(passed_file)
os.system('rm {}'.format(file_name))
return query_dict
def runOnNode(query, node_name, save_dir):
pickle_file, json_file = query.save(save_dir)
command = ['ssh', node_name,
'python3', os.path.join(BKB_PATHWAY_CORE_DIR, 'driver.py'),
'--config_file', os.path.join(BKB_PATHWAY_CORE_DIR, 'driver.config'),
'--headless',
'--query_file', pickle_file,
'--save_dir', save_dir]
subprocess.run(command)
return json_file
def makeVariableJsonFile(save_dir, node_name):
vars_file = os.path.join(save_dir, 'bkb_variables.pk')
command = ['ssh', node_name,
'python3', os.path.join(BKB_PATHWAY_CORE_DIR, 'driver.py'),
'--config_file', os.path.join(BKB_PATHWAY_CORE_DIR, 'driver.config'),
'--get_variables', vars_file]
subprocess.run(command)
#--Collect vars_dict from vars_file
with open(vars_file, 'rb') as f_:
vars_dict = pickle.load(f_)
return vars_dict
def collectResults(query_file):
with open(query_file) as f_:
query_res_dict = json.load(f_)
return query_res_dict
def sendJson(results):
print('Begin-JSON------')
print(json.JSONEncoder().encode(results))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--f', default=None, type=str)
parser.add_argument('--get_variables', action='store_true')
args = parser.parse_args()
if args.f is not None:
#-- Consume JSON File passed by UI
query_dict = consumeJsonFile(args.f)
#-- Process the passed JSON file into recognized and runnable Query
query = processUiQuery(query_dict)
#-- Analyze the Query and run reasoning on a specified dell node.
saved_query_file = runOnNode(query, NODE, SAVE_DIR)
#-- Load JSON result file and send back over ssh
res_json = collectResults(saved_query_file)
sendJson(res_json)
elif args.get_variables:
vars_dict = makeVariableJsonFile(SAVE_DIR, NODE)
sendJson(vars_dict)
| 32.290909
| 93
| 0.670045
|
import json
import argparse
import os
import sys
import pickle
import subprocess
from chp.query import Query
PASSED_JSON_FILE = '/home/cyakaboski/passed_message.json'
NODE = 'c-dell-m630-0-11'
SAVE_DIR = '/home/cyakaboski/temp'
BKB_PATHWAY_CORE_DIR = '/home/cyakaboski/src/python/projects/bkb-pathway-provider/core'
def processUiQuery(dict_):
query_dict = dict()
query_dict['name'] = dict_['name']
query_dict['evidence'] = dict_['genetic_evidence']
query_dict['targets'] = dict_['genetic_targets']
if dict_['demographic_evidence'] is not None:
query_dict['meta_evidence'] = [tuple(demo) for demo in dict_['demographic_evidence']]
else:
query_dict['meta_evidence'] = None
if dict_['demographic_targets'] is not None:
query_dict['meta_targets'] = [tuple(demo) for demo in dict_['demographic_targets']]
else:
query_dict['meta_targets'] = None
query = Query(**query_dict)
return query
def consumeJsonFile(file_name):
with open(file_name, 'r') as passed_file:
query_dict = json.load(passed_file)
os.system('rm {}'.format(file_name))
return query_dict
def runOnNode(query, node_name, save_dir):
pickle_file, json_file = query.save(save_dir)
command = ['ssh', node_name,
'python3', os.path.join(BKB_PATHWAY_CORE_DIR, 'driver.py'),
'--config_file', os.path.join(BKB_PATHWAY_CORE_DIR, 'driver.config'),
'--headless',
'--query_file', pickle_file,
'--save_dir', save_dir]
subprocess.run(command)
return json_file
def makeVariableJsonFile(save_dir, node_name):
vars_file = os.path.join(save_dir, 'bkb_variables.pk')
command = ['ssh', node_name,
'python3', os.path.join(BKB_PATHWAY_CORE_DIR, 'driver.py'),
'--config_file', os.path.join(BKB_PATHWAY_CORE_DIR, 'driver.config'),
'--get_variables', vars_file]
subprocess.run(command)
with open(vars_file, 'rb') as f_:
vars_dict = pickle.load(f_)
return vars_dict
def collectResults(query_file):
with open(query_file) as f_:
query_res_dict = json.load(f_)
return query_res_dict
def sendJson(results):
print('Begin-JSON------')
print(json.JSONEncoder().encode(results))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--f', default=None, type=str)
parser.add_argument('--get_variables', action='store_true')
args = parser.parse_args()
if args.f is not None:
query_dict = consumeJsonFile(args.f)
query = processUiQuery(query_dict)
saved_query_file = runOnNode(query, NODE, SAVE_DIR)
res_json = collectResults(saved_query_file)
sendJson(res_json)
elif args.get_variables:
vars_dict = makeVariableJsonFile(SAVE_DIR, NODE)
sendJson(vars_dict)
| true
| true
|
7906b1c9c9b2cb9c0ee481bcc7b16dc8a067b502
| 1,513
|
py
|
Python
|
collectors/icdpcs/collector.py
|
almeidaah/collectors
|
f03096855b8d702969d22af0b20a4d6a0d820bd0
|
[
"MIT"
] | 17
|
2016-06-28T21:20:21.000Z
|
2022-03-02T16:31:25.000Z
|
collectors/icdpcs/collector.py
|
almeidaah/collectors
|
f03096855b8d702969d22af0b20a4d6a0d820bd0
|
[
"MIT"
] | 41
|
2016-04-04T10:36:45.000Z
|
2017-04-24T10:04:57.000Z
|
collectors/icdpcs/collector.py
|
kenferrara/collectors
|
e6c1f45df3a1ffd5d60dada1816484812eb51417
|
[
"MIT"
] | 25
|
2016-05-18T09:27:42.000Z
|
2021-03-21T14:44:31.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import logging
import zipfile
import requests
from .record import Record
logger = logging.getLogger(__name__)
# Module API
def collect(conf, conn):
"""Collect ICD-XX-PCS procedures.
"""
# For more information see:
# https://www.cms.gov/Medicare/Coding/ICD10/2016-ICD-10-PCS-and-GEMs.html
URL = 'https://www.cms.gov/Medicare/Coding/ICD10/Downloads/2016-PCS-Long-Abbrev-Titles.zip'
FILE = 'icd10pcs_order_2016.txt'
VERSION = 'ICD-10-PCS'
LAST_UPDATED = '2015-10-01'
# Prepare file
zip = requests.get(URL).content
file = zipfile.ZipFile(io.BytesIO(zip)).open(FILE)
count = 0
for line in file:
# Prepare data
# Format is described in instruction
# stored in zip archive we download
data = {
'code': line[6:6+7].strip(),
'is_header': line[14:14+1].strip(),
'short_description': line[16:16+60].strip(),
'long_description': line[77:].strip(),
'version': VERSION,
'last_updated': LAST_UPDATED,
}
# Create record
record = Record.create(URL, data)
# Write record
record.write(conf, conn)
# Log info
count += 1
if not count % 100:
logger.info('Collected %s "%s" interventions', count, record.table)
| 27.017857
| 95
| 0.62657
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import logging
import zipfile
import requests
from .record import Record
logger = logging.getLogger(__name__)
def collect(conf, conn):
URL = 'https://www.cms.gov/Medicare/Coding/ICD10/Downloads/2016-PCS-Long-Abbrev-Titles.zip'
FILE = 'icd10pcs_order_2016.txt'
VERSION = 'ICD-10-PCS'
LAST_UPDATED = '2015-10-01'
zip = requests.get(URL).content
file = zipfile.ZipFile(io.BytesIO(zip)).open(FILE)
count = 0
for line in file:
data = {
'code': line[6:6+7].strip(),
'is_header': line[14:14+1].strip(),
'short_description': line[16:16+60].strip(),
'long_description': line[77:].strip(),
'version': VERSION,
'last_updated': LAST_UPDATED,
}
record = Record.create(URL, data)
record.write(conf, conn)
count += 1
if not count % 100:
logger.info('Collected %s "%s" interventions', count, record.table)
| true
| true
|
7906b272f32ab34bfbf3c74814bd934fbc3f5cc8
| 533
|
py
|
Python
|
src/ppb/cli/sub_cmd/_sub_command.py
|
Stibbons/python-project-bootstrap
|
b7956e272c4e36171b1d9f2fe9e7cbd271bd3b0d
|
[
"BSD-3-Clause"
] | null | null | null |
src/ppb/cli/sub_cmd/_sub_command.py
|
Stibbons/python-project-bootstrap
|
b7956e272c4e36171b1d9f2fe9e7cbd271bd3b0d
|
[
"BSD-3-Clause"
] | null | null | null |
src/ppb/cli/sub_cmd/_sub_command.py
|
Stibbons/python-project-bootstrap
|
b7956e272c4e36171b1d9f2fe9e7cbd271bd3b0d
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class SubCommand(object):
name = NotImplementedError("Please add 'name' member in your SubCommand")
help = NotImplementedError("Please add 'help' member in your SubCommand")
def addParser(self, parser):
raise NotImplementedError("Please implement 'addParser' method in your SubCommand")
def execute(self):
raise NotImplementedError("Please implement 'execute' method in your SubCommand")
| 35.533333
| 91
| 0.765478
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class SubCommand(object):
name = NotImplementedError("Please add 'name' member in your SubCommand")
help = NotImplementedError("Please add 'help' member in your SubCommand")
def addParser(self, parser):
raise NotImplementedError("Please implement 'addParser' method in your SubCommand")
def execute(self):
raise NotImplementedError("Please implement 'execute' method in your SubCommand")
| true
| true
|
7906b276f5d1f2ed5dbd89e8be1217ecadfc7062
| 19,441
|
py
|
Python
|
test/test_json_util.py
|
nloadholtes/mongo-python-driver
|
2818a32855a53799b58343bff0a46c5227057b19
|
[
"Apache-2.0"
] | 1
|
2021-12-14T12:44:24.000Z
|
2021-12-14T12:44:24.000Z
|
test/test_json_util.py
|
nloadholtes/mongo-python-driver
|
2818a32855a53799b58343bff0a46c5227057b19
|
[
"Apache-2.0"
] | null | null | null |
test/test_json_util.py
|
nloadholtes/mongo-python-driver
|
2818a32855a53799b58343bff0a46c5227057b19
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test some utilities for working with JSON and PyMongo."""
import datetime
import json
import re
import sys
import uuid
sys.path[0:0] = [""]
from bson import json_util, EPOCH_AWARE, SON
from bson.json_util import (DatetimeRepresentation,
STRICT_JSON_OPTIONS)
from bson.binary import (ALL_UUID_REPRESENTATIONS, Binary, MD5_SUBTYPE,
USER_DEFINED_SUBTYPE, UuidRepresentation, STANDARD)
from bson.code import Code
from bson.dbref import DBRef
from bson.int64 import Int64
from bson.max_key import MaxKey
from bson.min_key import MinKey
from bson.objectid import ObjectId
from bson.regex import Regex
from bson.timestamp import Timestamp
from bson.tz_util import FixedOffset, utc
from test import unittest, IntegrationTest
PY3 = sys.version_info[0] == 3
class TestJsonUtil(unittest.TestCase):
def round_tripped(self, doc, **kwargs):
return json_util.loads(json_util.dumps(doc, **kwargs), **kwargs)
def round_trip(self, doc, **kwargs):
self.assertEqual(doc, self.round_tripped(doc, **kwargs))
def test_basic(self):
self.round_trip({"hello": "world"})
def test_json_options_with_options(self):
opts = json_util.JSONOptions(
datetime_representation=DatetimeRepresentation.NUMBERLONG)
self.assertEqual(
opts.datetime_representation, DatetimeRepresentation.NUMBERLONG)
opts2 = opts.with_options(
datetime_representation=DatetimeRepresentation.ISO8601)
self.assertEqual(
opts2.datetime_representation, DatetimeRepresentation.ISO8601)
opts = json_util.JSONOptions(strict_number_long=True)
self.assertEqual(opts.strict_number_long, True)
opts2 = opts.with_options(strict_number_long=False)
self.assertEqual(opts2.strict_number_long, False)
opts = json_util.CANONICAL_JSON_OPTIONS
self.assertNotEqual(
opts.uuid_representation, UuidRepresentation.JAVA_LEGACY)
opts2 = opts.with_options(
uuid_representation=UuidRepresentation.JAVA_LEGACY)
self.assertEqual(
opts2.uuid_representation, UuidRepresentation.JAVA_LEGACY)
self.assertEqual(opts2.document_class, dict)
opts3 = opts2.with_options(document_class=SON)
self.assertEqual(
opts3.uuid_representation, UuidRepresentation.JAVA_LEGACY)
self.assertEqual(opts3.document_class, SON)
def test_objectid(self):
self.round_trip({"id": ObjectId()})
def test_dbref(self):
self.round_trip({"ref": DBRef("foo", 5)})
self.round_trip({"ref": DBRef("foo", 5, "db")})
self.round_trip({"ref": DBRef("foo", ObjectId())})
# Check order.
self.assertEqual(
'{"$ref": "collection", "$id": 1, "$db": "db"}',
json_util.dumps(DBRef('collection', 1, 'db')))
def test_datetime(self):
# only millis, not micros
self.round_trip({"date": datetime.datetime(2009, 12, 9, 15,
49, 45, 191000, utc)})
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000+0000"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000+0000"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000+00:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000Z"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000Z"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00Z"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
# No explicit offset
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
# Localtime behind UTC
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000-0800"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000000-0800"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000-08:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
# Localtime ahead of UTC
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000+0100"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000000+0100"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000+01:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
dtm = datetime.datetime(1, 1, 1, 1, 1, 1, 0, utc)
jsn = '{"dt": {"$date": -62135593139000}}'
self.assertEqual(dtm, json_util.loads(jsn)["dt"])
jsn = '{"dt": {"$date": {"$numberLong": "-62135593139000"}}}'
self.assertEqual(dtm, json_util.loads(jsn)["dt"])
# Test dumps format
pre_epoch = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000, utc)}
post_epoch = {"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc)}
self.assertEqual(
'{"dt": {"$date": -62135593138990}}',
json_util.dumps(pre_epoch))
self.assertEqual(
'{"dt": {"$date": 63075661010}}',
json_util.dumps(post_epoch))
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "-62135593138990"}}}',
json_util.dumps(pre_epoch, json_options=STRICT_JSON_OPTIONS))
self.assertEqual(
'{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}',
json_util.dumps(post_epoch, json_options=STRICT_JSON_OPTIONS))
number_long_options = json_util.JSONOptions(
datetime_representation=DatetimeRepresentation.NUMBERLONG)
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "63075661010"}}}',
json_util.dumps(post_epoch, json_options=number_long_options))
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "-62135593138990"}}}',
json_util.dumps(pre_epoch, json_options=number_long_options))
# ISO8601 mode assumes naive datetimes are UTC
pre_epoch_naive = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000)}
post_epoch_naive = {
"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000)}
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "-62135593138990"}}}',
json_util.dumps(pre_epoch_naive, json_options=STRICT_JSON_OPTIONS))
self.assertEqual(
'{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}',
json_util.dumps(post_epoch_naive,
json_options=STRICT_JSON_OPTIONS))
# Test tz_aware and tzinfo options
self.assertEqual(
datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc),
json_util.loads(
'{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}')["dt"])
self.assertEqual(
datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc),
json_util.loads(
'{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}',
json_options=json_util.JSONOptions(tz_aware=True,
tzinfo=utc))["dt"])
self.assertEqual(
datetime.datetime(1972, 1, 1, 1, 1, 1, 10000),
json_util.loads(
'{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}',
json_options=json_util.JSONOptions(tz_aware=False))["dt"])
self.round_trip(pre_epoch_naive, json_options=json_util.JSONOptions(
tz_aware=False))
# Test a non-utc timezone
pacific = FixedOffset(-8 * 60, 'US/Pacific')
aware_datetime = {"dt": datetime.datetime(2002, 10, 27, 6, 0, 0, 10000,
pacific)}
self.assertEqual(
'{"dt": {"$date": "2002-10-27T06:00:00.010-0800"}}',
json_util.dumps(aware_datetime, json_options=STRICT_JSON_OPTIONS))
self.round_trip(aware_datetime, json_options=json_util.JSONOptions(
tz_aware=True, tzinfo=pacific))
self.round_trip(aware_datetime, json_options=json_util.JSONOptions(
datetime_representation=DatetimeRepresentation.ISO8601,
tz_aware=True, tzinfo=pacific))
def test_regex_object_hook(self):
# Extended JSON format regular expression.
pat = 'a*b'
json_re = '{"$regex": "%s", "$options": "u"}' % pat
loaded = json_util.object_hook(json.loads(json_re))
self.assertTrue(isinstance(loaded, Regex))
self.assertEqual(pat, loaded.pattern)
self.assertEqual(re.U, loaded.flags)
def test_regex(self):
for regex_instance in (
re.compile("a*b", re.IGNORECASE),
Regex("a*b", re.IGNORECASE)):
res = self.round_tripped({"r": regex_instance})["r"]
self.assertEqual("a*b", res.pattern)
res = self.round_tripped({"r": Regex("a*b", re.IGNORECASE)})["r"]
self.assertEqual("a*b", res.pattern)
self.assertEqual(re.IGNORECASE, res.flags)
unicode_options = re.I|re.M|re.S|re.U|re.X
regex = re.compile("a*b", unicode_options)
res = self.round_tripped({"r": regex})["r"]
self.assertEqual(unicode_options, res.flags)
# Some tools may not add $options if no flags are set.
res = json_util.loads('{"r": {"$regex": "a*b"}}')['r']
self.assertEqual(0, res.flags)
self.assertEqual(
Regex('.*', 'ilm'),
json_util.loads(
'{"r": {"$regex": ".*", "$options": "ilm"}}')['r'])
# Check order.
self.assertEqual(
'{"$regex": ".*", "$options": "mx"}',
json_util.dumps(Regex('.*', re.M | re.X)))
self.assertEqual(
'{"$regex": ".*", "$options": "mx"}',
json_util.dumps(re.compile(b'.*', re.M | re.X)))
def test_minkey(self):
self.round_trip({"m": MinKey()})
def test_maxkey(self):
self.round_trip({"m": MaxKey()})
def test_timestamp(self):
dct = {"ts": Timestamp(4, 13)}
res = json_util.dumps(dct, default=json_util.default)
rtdct = json_util.loads(res)
self.assertEqual(dct, rtdct)
self.assertEqual('{"ts": {"$timestamp": {"t": 4, "i": 13}}}', res)
def test_uuid(self):
doc = {'uuid': uuid.UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479')}
self.round_trip(doc)
self.assertEqual(
'{"uuid": {"$uuid": "f47ac10b58cc4372a5670e02b2c3d479"}}',
json_util.dumps(doc))
self.assertEqual(
'{"uuid": '
'{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}',
json_util.dumps(
doc, json_options=json_util.STRICT_JSON_OPTIONS))
self.assertEqual(
'{"uuid": '
'{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}',
json_util.dumps(
doc, json_options=json_util.JSONOptions(
strict_uuid=True, uuid_representation=STANDARD)))
self.assertEqual(
doc, json_util.loads(
'{"uuid": '
'{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}'))
for uuid_representation in (set(ALL_UUID_REPRESENTATIONS) -
{UuidRepresentation.UNSPECIFIED}):
options = json_util.JSONOptions(
strict_uuid=True, uuid_representation=uuid_representation)
self.round_trip(doc, json_options=options)
# Ignore UUID representation when decoding BSON binary subtype 4.
self.assertEqual(doc, json_util.loads(
'{"uuid": '
'{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}',
json_options=options))
def test_uuid_uuid_rep_unspecified(self):
_uuid = uuid.uuid4()
options = json_util.JSONOptions(
strict_uuid=True,
uuid_representation=UuidRepresentation.UNSPECIFIED)
# Cannot directly encode native UUIDs with UNSPECIFIED.
doc = {'uuid': _uuid}
with self.assertRaises(ValueError):
json_util.dumps(doc, json_options=options)
# All UUID subtypes are decoded as Binary with UNSPECIFIED.
# subtype 3
doc = {'uuid': Binary(_uuid.bytes, subtype=3)}
ext_json_str = json_util.dumps(doc)
self.assertEqual(
doc, json_util.loads(ext_json_str, json_options=options))
# subtype 4
doc = {'uuid': Binary(_uuid.bytes, subtype=4)}
ext_json_str = json_util.dumps(doc)
self.assertEqual(
doc, json_util.loads(ext_json_str, json_options=options))
# $uuid-encoded fields
doc = {'uuid': Binary(_uuid.bytes, subtype=4)}
ext_json_str = json_util.dumps({'uuid': _uuid})
self.assertEqual(
doc, json_util.loads(ext_json_str, json_options=options))
def test_binary(self):
if PY3:
bin_type_dict = {"bin": b"\x00\x01\x02\x03\x04"}
else:
bin_type_dict = {"bin": Binary(b"\x00\x01\x02\x03\x04")}
md5_type_dict = {
"md5": Binary(b' n7\x18\xaf\t/\xd1\xd1/\x80\xca\xe7q\xcc\xac',
MD5_SUBTYPE)}
custom_type_dict = {"custom": Binary(b"hello", USER_DEFINED_SUBTYPE)}
self.round_trip(bin_type_dict)
self.round_trip(md5_type_dict)
self.round_trip(custom_type_dict)
# Binary with subtype 0 is decoded into bytes in Python 3.
bin = json_util.loads(
'{"bin": {"$binary": "AAECAwQ=", "$type": "00"}}')['bin']
if PY3:
self.assertEqual(type(bin), bytes)
else:
self.assertEqual(type(bin), Binary)
# PYTHON-443 ensure old type formats are supported
json_bin_dump = json_util.dumps(bin_type_dict)
self.assertTrue('"$type": "00"' in json_bin_dump)
self.assertEqual(bin_type_dict,
json_util.loads('{"bin": {"$type": 0, "$binary": "AAECAwQ="}}'))
json_bin_dump = json_util.dumps(md5_type_dict)
# Check order.
self.assertEqual(
'{"md5": {"$binary": "IG43GK8JL9HRL4DK53HMrA==",'
+ ' "$type": "05"}}',
json_bin_dump)
self.assertEqual(md5_type_dict,
json_util.loads('{"md5": {"$type": 5, "$binary":'
' "IG43GK8JL9HRL4DK53HMrA=="}}'))
json_bin_dump = json_util.dumps(custom_type_dict)
self.assertTrue('"$type": "80"' in json_bin_dump)
self.assertEqual(custom_type_dict,
json_util.loads('{"custom": {"$type": 128, "$binary":'
' "aGVsbG8="}}'))
# Handle mongoexport where subtype >= 128
self.assertEqual(128,
json_util.loads('{"custom": {"$type": "ffffff80", "$binary":'
' "aGVsbG8="}}')['custom'].subtype)
self.assertEqual(255,
json_util.loads('{"custom": {"$type": "ffffffff", "$binary":'
' "aGVsbG8="}}')['custom'].subtype)
def test_code(self):
self.round_trip({"code": Code("function x() { return 1; }")})
code = Code("return z", z=2)
res = json_util.dumps(code)
self.assertEqual(code, json_util.loads(res))
# Check order.
self.assertEqual('{"$code": "return z", "$scope": {"z": 2}}', res)
no_scope = Code('function() {}')
self.assertEqual(
'{"$code": "function() {}"}', json_util.dumps(no_scope))
def test_undefined(self):
jsn = '{"name": {"$undefined": true}}'
self.assertIsNone(json_util.loads(jsn)['name'])
def test_numberlong(self):
jsn = '{"weight": {"$numberLong": "65535"}}'
self.assertEqual(json_util.loads(jsn)['weight'],
Int64(65535))
self.assertEqual(json_util.dumps({"weight": Int64(65535)}),
'{"weight": 65535}')
json_options = json_util.JSONOptions(strict_number_long=True)
self.assertEqual(json_util.dumps({"weight": Int64(65535)},
json_options=json_options),
jsn)
def test_loads_document_class(self):
# document_class dict should always work
self.assertEqual({"foo": "bar"}, json_util.loads(
'{"foo": "bar"}',
json_options=json_util.JSONOptions(document_class=dict)))
self.assertEqual(SON([("foo", "bar"), ("b", 1)]), json_util.loads(
'{"foo": "bar", "b": 1}',
json_options=json_util.JSONOptions(document_class=SON)))
class TestJsonUtilRoundtrip(IntegrationTest):
def test_cursor(self):
db = self.db
db.drop_collection("test")
docs = [
{'foo': [1, 2]},
{'bar': {'hello': 'world'}},
{'code': Code("function x() { return 1; }")},
{'bin': Binary(b"\x00\x01\x02\x03\x04", USER_DEFINED_SUBTYPE)},
{'dbref': {'_ref': DBRef('simple',
ObjectId('509b8db456c02c5ab7e63c34'))}}
]
db.test.insert_many(docs)
reloaded_docs = json_util.loads(json_util.dumps(db.test.find()))
for doc in docs:
self.assertTrue(doc in reloaded_docs)
if __name__ == "__main__":
unittest.main()
| 43.10643
| 79
| 0.581503
|
import datetime
import json
import re
import sys
import uuid
sys.path[0:0] = [""]
from bson import json_util, EPOCH_AWARE, SON
from bson.json_util import (DatetimeRepresentation,
STRICT_JSON_OPTIONS)
from bson.binary import (ALL_UUID_REPRESENTATIONS, Binary, MD5_SUBTYPE,
USER_DEFINED_SUBTYPE, UuidRepresentation, STANDARD)
from bson.code import Code
from bson.dbref import DBRef
from bson.int64 import Int64
from bson.max_key import MaxKey
from bson.min_key import MinKey
from bson.objectid import ObjectId
from bson.regex import Regex
from bson.timestamp import Timestamp
from bson.tz_util import FixedOffset, utc
from test import unittest, IntegrationTest
PY3 = sys.version_info[0] == 3
class TestJsonUtil(unittest.TestCase):
def round_tripped(self, doc, **kwargs):
return json_util.loads(json_util.dumps(doc, **kwargs), **kwargs)
def round_trip(self, doc, **kwargs):
self.assertEqual(doc, self.round_tripped(doc, **kwargs))
def test_basic(self):
self.round_trip({"hello": "world"})
def test_json_options_with_options(self):
opts = json_util.JSONOptions(
datetime_representation=DatetimeRepresentation.NUMBERLONG)
self.assertEqual(
opts.datetime_representation, DatetimeRepresentation.NUMBERLONG)
opts2 = opts.with_options(
datetime_representation=DatetimeRepresentation.ISO8601)
self.assertEqual(
opts2.datetime_representation, DatetimeRepresentation.ISO8601)
opts = json_util.JSONOptions(strict_number_long=True)
self.assertEqual(opts.strict_number_long, True)
opts2 = opts.with_options(strict_number_long=False)
self.assertEqual(opts2.strict_number_long, False)
opts = json_util.CANONICAL_JSON_OPTIONS
self.assertNotEqual(
opts.uuid_representation, UuidRepresentation.JAVA_LEGACY)
opts2 = opts.with_options(
uuid_representation=UuidRepresentation.JAVA_LEGACY)
self.assertEqual(
opts2.uuid_representation, UuidRepresentation.JAVA_LEGACY)
self.assertEqual(opts2.document_class, dict)
opts3 = opts2.with_options(document_class=SON)
self.assertEqual(
opts3.uuid_representation, UuidRepresentation.JAVA_LEGACY)
self.assertEqual(opts3.document_class, SON)
def test_objectid(self):
self.round_trip({"id": ObjectId()})
def test_dbref(self):
self.round_trip({"ref": DBRef("foo", 5)})
self.round_trip({"ref": DBRef("foo", 5, "db")})
self.round_trip({"ref": DBRef("foo", ObjectId())})
self.assertEqual(
'{"$ref": "collection", "$id": 1, "$db": "db"}',
json_util.dumps(DBRef('collection', 1, 'db')))
def test_datetime(self):
self.round_trip({"date": datetime.datetime(2009, 12, 9, 15,
49, 45, 191000, utc)})
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000+0000"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000+0000"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000+00:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000Z"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000Z"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00Z"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000-0800"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000000-0800"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000-08:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000+0100"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000000+0100"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000+01:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
dtm = datetime.datetime(1, 1, 1, 1, 1, 1, 0, utc)
jsn = '{"dt": {"$date": -62135593139000}}'
self.assertEqual(dtm, json_util.loads(jsn)["dt"])
jsn = '{"dt": {"$date": {"$numberLong": "-62135593139000"}}}'
self.assertEqual(dtm, json_util.loads(jsn)["dt"])
pre_epoch = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000, utc)}
post_epoch = {"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc)}
self.assertEqual(
'{"dt": {"$date": -62135593138990}}',
json_util.dumps(pre_epoch))
self.assertEqual(
'{"dt": {"$date": 63075661010}}',
json_util.dumps(post_epoch))
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "-62135593138990"}}}',
json_util.dumps(pre_epoch, json_options=STRICT_JSON_OPTIONS))
self.assertEqual(
'{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}',
json_util.dumps(post_epoch, json_options=STRICT_JSON_OPTIONS))
number_long_options = json_util.JSONOptions(
datetime_representation=DatetimeRepresentation.NUMBERLONG)
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "63075661010"}}}',
json_util.dumps(post_epoch, json_options=number_long_options))
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "-62135593138990"}}}',
json_util.dumps(pre_epoch, json_options=number_long_options))
pre_epoch_naive = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000)}
post_epoch_naive = {
"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000)}
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "-62135593138990"}}}',
json_util.dumps(pre_epoch_naive, json_options=STRICT_JSON_OPTIONS))
self.assertEqual(
'{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}',
json_util.dumps(post_epoch_naive,
json_options=STRICT_JSON_OPTIONS))
self.assertEqual(
datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc),
json_util.loads(
'{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}')["dt"])
self.assertEqual(
datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc),
json_util.loads(
'{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}',
json_options=json_util.JSONOptions(tz_aware=True,
tzinfo=utc))["dt"])
self.assertEqual(
datetime.datetime(1972, 1, 1, 1, 1, 1, 10000),
json_util.loads(
'{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}',
json_options=json_util.JSONOptions(tz_aware=False))["dt"])
self.round_trip(pre_epoch_naive, json_options=json_util.JSONOptions(
tz_aware=False))
pacific = FixedOffset(-8 * 60, 'US/Pacific')
aware_datetime = {"dt": datetime.datetime(2002, 10, 27, 6, 0, 0, 10000,
pacific)}
self.assertEqual(
'{"dt": {"$date": "2002-10-27T06:00:00.010-0800"}}',
json_util.dumps(aware_datetime, json_options=STRICT_JSON_OPTIONS))
self.round_trip(aware_datetime, json_options=json_util.JSONOptions(
tz_aware=True, tzinfo=pacific))
self.round_trip(aware_datetime, json_options=json_util.JSONOptions(
datetime_representation=DatetimeRepresentation.ISO8601,
tz_aware=True, tzinfo=pacific))
def test_regex_object_hook(self):
pat = 'a*b'
json_re = '{"$regex": "%s", "$options": "u"}' % pat
loaded = json_util.object_hook(json.loads(json_re))
self.assertTrue(isinstance(loaded, Regex))
self.assertEqual(pat, loaded.pattern)
self.assertEqual(re.U, loaded.flags)
def test_regex(self):
for regex_instance in (
re.compile("a*b", re.IGNORECASE),
Regex("a*b", re.IGNORECASE)):
res = self.round_tripped({"r": regex_instance})["r"]
self.assertEqual("a*b", res.pattern)
res = self.round_tripped({"r": Regex("a*b", re.IGNORECASE)})["r"]
self.assertEqual("a*b", res.pattern)
self.assertEqual(re.IGNORECASE, res.flags)
unicode_options = re.I|re.M|re.S|re.U|re.X
regex = re.compile("a*b", unicode_options)
res = self.round_tripped({"r": regex})["r"]
self.assertEqual(unicode_options, res.flags)
res = json_util.loads('{"r": {"$regex": "a*b"}}')['r']
self.assertEqual(0, res.flags)
self.assertEqual(
Regex('.*', 'ilm'),
json_util.loads(
'{"r": {"$regex": ".*", "$options": "ilm"}}')['r'])
self.assertEqual(
'{"$regex": ".*", "$options": "mx"}',
json_util.dumps(Regex('.*', re.M | re.X)))
self.assertEqual(
'{"$regex": ".*", "$options": "mx"}',
json_util.dumps(re.compile(b'.*', re.M | re.X)))
def test_minkey(self):
self.round_trip({"m": MinKey()})
def test_maxkey(self):
self.round_trip({"m": MaxKey()})
def test_timestamp(self):
dct = {"ts": Timestamp(4, 13)}
res = json_util.dumps(dct, default=json_util.default)
rtdct = json_util.loads(res)
self.assertEqual(dct, rtdct)
self.assertEqual('{"ts": {"$timestamp": {"t": 4, "i": 13}}}', res)
def test_uuid(self):
doc = {'uuid': uuid.UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479')}
self.round_trip(doc)
self.assertEqual(
'{"uuid": {"$uuid": "f47ac10b58cc4372a5670e02b2c3d479"}}',
json_util.dumps(doc))
self.assertEqual(
'{"uuid": '
'{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}',
json_util.dumps(
doc, json_options=json_util.STRICT_JSON_OPTIONS))
self.assertEqual(
'{"uuid": '
'{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}',
json_util.dumps(
doc, json_options=json_util.JSONOptions(
strict_uuid=True, uuid_representation=STANDARD)))
self.assertEqual(
doc, json_util.loads(
'{"uuid": '
'{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}'))
for uuid_representation in (set(ALL_UUID_REPRESENTATIONS) -
{UuidRepresentation.UNSPECIFIED}):
options = json_util.JSONOptions(
strict_uuid=True, uuid_representation=uuid_representation)
self.round_trip(doc, json_options=options)
self.assertEqual(doc, json_util.loads(
'{"uuid": '
'{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}',
json_options=options))
def test_uuid_uuid_rep_unspecified(self):
_uuid = uuid.uuid4()
options = json_util.JSONOptions(
strict_uuid=True,
uuid_representation=UuidRepresentation.UNSPECIFIED)
doc = {'uuid': _uuid}
with self.assertRaises(ValueError):
json_util.dumps(doc, json_options=options)
doc = {'uuid': Binary(_uuid.bytes, subtype=3)}
ext_json_str = json_util.dumps(doc)
self.assertEqual(
doc, json_util.loads(ext_json_str, json_options=options))
doc = {'uuid': Binary(_uuid.bytes, subtype=4)}
ext_json_str = json_util.dumps(doc)
self.assertEqual(
doc, json_util.loads(ext_json_str, json_options=options))
doc = {'uuid': Binary(_uuid.bytes, subtype=4)}
ext_json_str = json_util.dumps({'uuid': _uuid})
self.assertEqual(
doc, json_util.loads(ext_json_str, json_options=options))
def test_binary(self):
if PY3:
bin_type_dict = {"bin": b"\x00\x01\x02\x03\x04"}
else:
bin_type_dict = {"bin": Binary(b"\x00\x01\x02\x03\x04")}
md5_type_dict = {
"md5": Binary(b' n7\x18\xaf\t/\xd1\xd1/\x80\xca\xe7q\xcc\xac',
MD5_SUBTYPE)}
custom_type_dict = {"custom": Binary(b"hello", USER_DEFINED_SUBTYPE)}
self.round_trip(bin_type_dict)
self.round_trip(md5_type_dict)
self.round_trip(custom_type_dict)
bin = json_util.loads(
'{"bin": {"$binary": "AAECAwQ=", "$type": "00"}}')['bin']
if PY3:
self.assertEqual(type(bin), bytes)
else:
self.assertEqual(type(bin), Binary)
json_bin_dump = json_util.dumps(bin_type_dict)
self.assertTrue('"$type": "00"' in json_bin_dump)
self.assertEqual(bin_type_dict,
json_util.loads('{"bin": {"$type": 0, "$binary": "AAECAwQ="}}'))
json_bin_dump = json_util.dumps(md5_type_dict)
self.assertEqual(
'{"md5": {"$binary": "IG43GK8JL9HRL4DK53HMrA==",'
+ ' "$type": "05"}}',
json_bin_dump)
self.assertEqual(md5_type_dict,
json_util.loads('{"md5": {"$type": 5, "$binary":'
' "IG43GK8JL9HRL4DK53HMrA=="}}'))
json_bin_dump = json_util.dumps(custom_type_dict)
self.assertTrue('"$type": "80"' in json_bin_dump)
self.assertEqual(custom_type_dict,
json_util.loads('{"custom": {"$type": 128, "$binary":'
' "aGVsbG8="}}'))
self.assertEqual(128,
json_util.loads('{"custom": {"$type": "ffffff80", "$binary":'
' "aGVsbG8="}}')['custom'].subtype)
self.assertEqual(255,
json_util.loads('{"custom": {"$type": "ffffffff", "$binary":'
' "aGVsbG8="}}')['custom'].subtype)
def test_code(self):
self.round_trip({"code": Code("function x() { return 1; }")})
code = Code("return z", z=2)
res = json_util.dumps(code)
self.assertEqual(code, json_util.loads(res))
self.assertEqual('{"$code": "return z", "$scope": {"z": 2}}', res)
no_scope = Code('function() {}')
self.assertEqual(
'{"$code": "function() {}"}', json_util.dumps(no_scope))
def test_undefined(self):
jsn = '{"name": {"$undefined": true}}'
self.assertIsNone(json_util.loads(jsn)['name'])
def test_numberlong(self):
jsn = '{"weight": {"$numberLong": "65535"}}'
self.assertEqual(json_util.loads(jsn)['weight'],
Int64(65535))
self.assertEqual(json_util.dumps({"weight": Int64(65535)}),
'{"weight": 65535}')
json_options = json_util.JSONOptions(strict_number_long=True)
self.assertEqual(json_util.dumps({"weight": Int64(65535)},
json_options=json_options),
jsn)
def test_loads_document_class(self):
self.assertEqual({"foo": "bar"}, json_util.loads(
'{"foo": "bar"}',
json_options=json_util.JSONOptions(document_class=dict)))
self.assertEqual(SON([("foo", "bar"), ("b", 1)]), json_util.loads(
'{"foo": "bar", "b": 1}',
json_options=json_util.JSONOptions(document_class=SON)))
class TestJsonUtilRoundtrip(IntegrationTest):
def test_cursor(self):
db = self.db
db.drop_collection("test")
docs = [
{'foo': [1, 2]},
{'bar': {'hello': 'world'}},
{'code': Code("function x() { return 1; }")},
{'bin': Binary(b"\x00\x01\x02\x03\x04", USER_DEFINED_SUBTYPE)},
{'dbref': {'_ref': DBRef('simple',
ObjectId('509b8db456c02c5ab7e63c34'))}}
]
db.test.insert_many(docs)
reloaded_docs = json_util.loads(json_util.dumps(db.test.find()))
for doc in docs:
self.assertTrue(doc in reloaded_docs)
if __name__ == "__main__":
unittest.main()
| true
| true
|
7906b47936f6cb37291765cf70cb2349d6b0a257
| 219
|
py
|
Python
|
django_c3po/signals.py
|
VorskiImagineering/django-C3PO
|
cd2c9b246fbae3f3d95349019d5109ce31101957
|
[
"MIT"
] | 1
|
2015-10-27T12:49:50.000Z
|
2015-10-27T12:49:50.000Z
|
django_c3po/signals.py
|
VorskiImagineering/django-C3PO
|
cd2c9b246fbae3f3d95349019d5109ce31101957
|
[
"MIT"
] | 3
|
2020-02-11T21:28:25.000Z
|
2021-06-10T17:24:09.000Z
|
django_c3po/signals.py
|
VorskiImagineering/django-C3PO
|
cd2c9b246fbae3f3d95349019d5109ce31101957
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import django.dispatch
# Signal to inform application about ready .mo files, so server will know
# when to restart itself.
post_compilemessages = django.dispatch.Signal()
| 24.333333
| 73
| 0.739726
|
import django.dispatch
post_compilemessages = django.dispatch.Signal()
| true
| true
|
7906b5583ffd22b17313c6cdb7aeef3b364df4c7
| 21,082
|
py
|
Python
|
src/pnumpy/sort.py
|
Quansight/pnumpy
|
59d430f74168539a0710321c4eecb53d25db4833
|
[
"MIT"
] | 24
|
2021-02-18T12:05:08.000Z
|
2021-12-13T07:46:03.000Z
|
src/pnumpy/sort.py
|
Quansight/numpy-threading-extensions
|
59d430f74168539a0710321c4eecb53d25db4833
|
[
"MIT"
] | 63
|
2020-09-02T19:14:10.000Z
|
2021-01-26T07:04:09.000Z
|
src/pnumpy/sort.py
|
Quansight/numpy-threading-extensions
|
59d430f74168539a0710321c4eecb53d25db4833
|
[
"MIT"
] | 9
|
2020-09-08T15:27:13.000Z
|
2021-01-21T16:50:02.000Z
|
import os
import sys
__all__ = [
'lexsort','sort', 'argsort','argmin', 'argmax', 'searchsorted']
from pnumpy._pnumpy import getitem, lexsort32, lexsort64
import numpy as np
from numpy import asarray, array, asanyarray
from numpy import concatenate
#array_function_dispatch = functools.partial(
# overrides.array_function_dispatch, module='numpy')
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj), method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def _wrapfunc(obj, method, *args, **kwds):
bound = getattr(obj, method, None)
if bound is None:
return _wrapit(obj, method, *args, **kwds)
try:
return bound(*args, **kwds)
except TypeError:
# A TypeError occurs if the object does have such a method in its
# class, but its signature is not identical to that of NumPy's. This
# situation has occurred in the case of a downstream library like
# 'pandas'.
#
# Call _wrapit from within the except clause to ensure a potential
# exception has a traceback chain.
return _wrapit(obj, method, *args, **kwds)
def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort or radix sort under the covers and, in general,
the actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
Threading
---------
Up to 8 threads
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
partition : Partial sort.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The four algorithms implemented in NumPy have the following
properties:
=========== ======= ============= ============ ========
kind speed worst case work space stable
=========== ======= ============= ============ ========
'quicksort' 1 O(n^2) 0 no
'heapsort' 3 O(n*log(n)) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'timsort' 2 O(n*log(n)) ~n/2 yes
=========== ======= ============= ============ ========
.. note:: The datatype determines which of 'mergesort' or 'timsort'
is actually used, even if 'mergesort' is specified. User selection
at a finer scale is not currently available.
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
.. versionadded:: 1.12.0
quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_.
When sorting does not make enough progress it switches to
`heapsort <https://en.wikipedia.org/wiki/Heapsort>`_.
This implementation makes quicksort O(n*log(n)) in the worst case.
'stable' automatically chooses the best stable sorting algorithm
for the data type being sorted.
It, along with 'mergesort' is currently mapped to
`timsort <https://en.wikipedia.org/wiki/Timsort>`_
or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_
depending on the data type.
API forward compatibility currently limits the
ability to select the implementation and it is hardwired for the different
data types.
.. versionadded:: 1.17.0
Timsort is added for better performance on already or nearly
sorted data. On random data timsort is almost identical to
mergesort. It is now used for stable sort while quicksort is still the
default sort if none is chosen. For timsort details, refer to
`CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
O(n) sort instead of O(n log n).
.. versionchanged:: 1.18.0
NaT now sorts to the end of arrays for consistency with NaN.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
# flatten returns (1, N) for np.matrix, so always use the last axis
a = asanyarray(a).flatten()
axis = -1
try:
# attempt a parallel sort
sort(a, kind=kind)
return a
except Exception:
pass
else:
a = asanyarray(a).copy(order="K")
# normal numpy code
a.sort(axis=axis, kind=kind, order=order)
return a
def lexsort(*args, **kwargs):
"""
Perform an indirect stable sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, it's rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
Threading
---------
Up to 8 threads
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> ind
array([2, 0, 4, 6, 5, 3, 1])
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1])
"""
try:
return lexsort32(*args, **kwargs)
except Exception:
return np.lexsort(*args, **kwargs)
def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort under the covers and, in general, the
actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
take_along_axis : Apply ``index_array`` from argsort
to an array as if by calling sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
def _argmax_dispatcher(a, axis=None, out=None):
return (a, out)
def argmax(a, axis=None, out=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmax to an array as if by calling max.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
Indexes of the maximal elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
>>> ind
(1, 2)
>>> a[ind]
15
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmax(x, axis=-1)
>>> # Same as np.max(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[4],
[3]])
>>> # Same as np.max(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([4, 3])
"""
return _wrapfunc(a, 'argmax', axis=axis, out=out)
def _argmin_dispatcher(a, axis=None, out=None):
return (a, out)
def argmin(a, axis=None, out=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmin to an array as if by calling min.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
array([0, 0, 0])
>>> np.argmin(a, axis=1)
array([0, 0])
Indices of the minimum elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)
>>> ind
(0, 0)
>>> a[ind]
10
>>> b = np.arange(6) + 10
>>> b[4] = 10
>>> b
array([10, 11, 12, 13, 10, 15])
>>> np.argmin(b) # Only the first occurrence is returned.
0
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmin(x, axis=-1)
>>> # Same as np.min(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[2],
[0]])
>>> # Same as np.max(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([2, 0])
"""
return _wrapfunc(a, 'argmin', axis=axis, out=out)
def _searchsorted_dispatcher(a, v, side=None, sorter=None):
return (a, v, sorter)
def searchsorted(a, v, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Assuming that `a` is sorted:
====== ============================
`side` returned index `i` satisfies
====== ============================
left ``a[i-1] < v <= a[i]``
right ``a[i-1] <= v < a[i]``
====== ============================
Parameters
----------
a : 1-D array_like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
.. versionadded:: 1.7.0
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
This function uses the same algorithm as the builtin python `bisect.bisect_left`
(``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,
which is also vectorized in the `v` argument.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
| 33.043887
| 94
| 0.591737
|
import os
import sys
__all__ = [
'lexsort','sort', 'argsort','argmin', 'argmax', 'searchsorted']
from pnumpy._pnumpy import getitem, lexsort32, lexsort64
import numpy as np
from numpy import asarray, array, asanyarray
from numpy import concatenate
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj), method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def _wrapfunc(obj, method, *args, **kwds):
bound = getattr(obj, method, None)
if bound is None:
return _wrapit(obj, method, *args, **kwds)
try:
return bound(*args, **kwds)
except TypeError:
# situation has occurred in the case of a downstream library like
# 'pandas'.
#
# Call _wrapit from within the except clause to ensure a potential
# exception has a traceback chain.
return _wrapit(obj, method, *args, **kwds)
def sort(a, axis=-1, kind=None, order=None):
if axis is None:
# flatten returns (1, N) for np.matrix, so always use the last axis
a = asanyarray(a).flatten()
axis = -1
try:
# attempt a parallel sort
sort(a, kind=kind)
return a
except Exception:
pass
else:
a = asanyarray(a).copy(order="K")
# normal numpy code
a.sort(axis=axis, kind=kind, order=order)
return a
def lexsort(*args, **kwargs):
try:
return lexsort32(*args, **kwargs)
except Exception:
return np.lexsort(*args, **kwargs)
def argsort(a, axis=-1, kind=None, order=None):
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
def _argmax_dispatcher(a, axis=None, out=None):
return (a, out)
def argmax(a, axis=None, out=None):
return _wrapfunc(a, 'argmax', axis=axis, out=out)
def _argmin_dispatcher(a, axis=None, out=None):
return (a, out)
def argmin(a, axis=None, out=None):
return _wrapfunc(a, 'argmin', axis=axis, out=out)
def _searchsorted_dispatcher(a, v, side=None, sorter=None):
return (a, v, sorter)
def searchsorted(a, v, side='left', sorter=None):
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
| true
| true
|
7906b6a1d04a573799d11505d8ba6ad46b7056ce
| 26,863
|
py
|
Python
|
mbreplacer.py
|
ackhoury/mbreplacer
|
aea17fb0cc6e8c17c0ffb81560e9ecab0f8dc0ee
|
[
"MIT"
] | null | null | null |
mbreplacer.py
|
ackhoury/mbreplacer
|
aea17fb0cc6e8c17c0ffb81560e9ecab0f8dc0ee
|
[
"MIT"
] | null | null | null |
mbreplacer.py
|
ackhoury/mbreplacer
|
aea17fb0cc6e8c17c0ffb81560e9ecab0f8dc0ee
|
[
"MIT"
] | null | null | null |
import os
import shutil
import subprocess
import sys
from enum import Enum
from PyQt5 import QtCore
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QMainWindow, QApplication, QListWidgetItem, QFileDialog, QComboBox, QMessageBox, \
QAbstractItemView, QDialogButtonBox, QLabel, QWidget, QPushButton, QListWidget, QFrame, QProgressBar, QStatusBar
class Status(Enum):
OK = 0
WARN = 1
FAIL = 2
def get_qt_data_keys(num_keys):
assert num_keys <= 255 and "too many keys queried"
possible_keys = range(256)
used_keys = list(map(int, [QtCore.Qt.CheckStateRole,
QtCore.Qt.DecorationRole,
QtCore.Qt.AccessibleDescriptionRole,
QtCore.Qt.AccessibleTextRole,
QtCore.Qt.BackgroundColorRole,
QtCore.Qt.BackgroundRole,
QtCore.Qt.DisplayRole,
QtCore.Qt.EditRole,
QtCore.Qt.FontRole,
QtCore.Qt.ForegroundRole,
QtCore.Qt.InitialSortOrderRole,
QtCore.Qt.SizeHintRole,
QtCore.Qt.StatusTipRole,
QtCore.Qt.TextAlignmentRole,
QtCore.Qt.TextColorRole,
QtCore.Qt.ToolTipRole,
QtCore.Qt.UserRole,
QtCore.Qt.WhatsThisRole]))
c, keys = 0, []
for key in possible_keys:
if c < num_keys and key not in used_keys:
keys.append(key)
c += 1
return keys
class ChooseStagePopupUI:
def __init__(self):
self._stage_select_combobox = None # type: QComboBox
self._dialog_button_box = None # type: QDialogButtonBox
self._choose_stage_label = None # type: QLabel
self._stage_base_names = []
def _setup_ui(self, choose_stage_popup):
choose_stage_popup.setObjectName("choose_stage_popupI")
choose_stage_popup.resize(493, 108)
self._stage_select_combobox = QComboBox(choose_stage_popup)
self._stage_select_combobox.setGeometry(QtCore.QRect(10, 30, 471, 27))
self._stage_select_combobox.setObjectName("stage_select_combobox")
self._load_stages()
self._dialog_button_box = QDialogButtonBox(choose_stage_popup)
self._dialog_button_box.setGeometry(QtCore.QRect(150, 70, 176, 27))
self._dialog_button_box.setStandardButtons(QDialogButtonBox.Cancel | QDialogButtonBox.Ok)
self._dialog_button_box.setObjectName("dialog_button_box")
self._dialog_button_box.rejected.connect(self.close)
self._choose_stage_label = QLabel(choose_stage_popup)
self._choose_stage_label.setGeometry(QtCore.QRect(10, 10, 461, 17))
self._choose_stage_label.setObjectName("choose_stage_label")
self._choose_stage_label.setText("Choose monkeyball stage to replace (Challenge Mode)")
choose_stage_popup.setWindowTitle("Choose Stage to Replace")
def _load_stages(self):
with open(os.path.join(get_mbreplacer_dir(), 'resources', 'challenge_stages_list.txt'), 'r') as f:
for line in f:
clean_line = line.strip()
self._stage_select_combobox.addItem(clean_line)
self._stage_base_names.append(clean_line)
class ChooseStagePopup(QMainWindow, ChooseStagePopupUI):
def __init__(self):
QMainWindow.__init__(self)
ChooseStagePopupUI.__init__(self)
self._setup_ui(self)
def connect(self, callback):
self._dialog_button_box.accepted.connect(callback)
def get_selected_stage_index(self):
return int(self._stage_select_combobox.currentIndex())
def set_associated_stage(self, index, associated_stage):
self._stage_select_combobox.setItemText(index, self._stage_base_names[index] + " [{}]".format(associated_stage))
def remove_associated_stage(self, stage_index):
self._stage_select_combobox.setItemText(stage_index, self._stage_base_names[stage_index])
def get_stage_name(self, index):
return self._stage_base_names[index].split(":")[1][1:]
def get_stage_id(self, index):
return self._stage_base_names[index].split(":")[0]
def increment_stage_index(self):
current_idx = self._stage_select_combobox.currentIndex()
if current_idx == self._stage_select_combobox.count() - 1:
current_idx = 0
else:
current_idx += 1
self._stage_select_combobox.setCurrentIndex(current_idx)
class MBReplacerUI:
def __init__(self):
self._central_widget = None # type: QWidget
self._import_multiple_stages_btn = None # type: QPushButton
self._import_root_btn = None # type: QPushButton
self._imported_stages_list = None # type: QListWidget
self._imported_stages_label = None # type: QLabel
self._replace_queue_list = None # type: QListWidget
self._stages_to_be_replaced_label = None # type: QLabel
self._replace_btn = None # type: QPushButton
self._add_to_replace_btn = None # type: QPushButton
self._remove_from_replace_btn = None # type: QPushButton
self._progress_bar = None # type: QProgressBar
self._line = None # type: QFrame
self._add_single_stage_btn = None # type: QPushButton
self._remove_single_stage_btn = None # type: QPushButton
self._status_bar = None # type: QStatusBar
def _setup_ui(self, mbreplacer):
mbreplacer.setObjectName("mbreplacer")
mbreplacer.resize(961, 545)
self._central_widget = QWidget(mbreplacer)
self._central_widget.setObjectName("centralWidget")
self._import_multiple_stages_btn = QPushButton(self._central_widget)
self._import_multiple_stages_btn.setGeometry(QtCore.QRect(150, 490, 151, 27))
self._import_multiple_stages_btn.setObjectName("import_multiple_stages_btn")
self._import_multiple_stages_btn.setText("import multiple from folder")
self._import_root_btn = QPushButton(self._central_widget)
self._import_root_btn.setGeometry(QtCore.QRect(10, 10, 161, 31))
self._import_root_btn.setObjectName("import_root_btn")
self._import_root_btn.setText("import root folder")
self._imported_stages_list = QListWidget(self._central_widget)
self._imported_stages_list.setGeometry(QtCore.QRect(10, 80, 431, 401))
self._imported_stages_list.setObjectName("imported_stages_list")
self._imported_stages_list.setSelectionMode(QAbstractItemView.ExtendedSelection)
self._imported_stages_label = QLabel(self._central_widget)
self._imported_stages_label.setGeometry(QtCore.QRect(170, 50, 111, 31))
self._imported_stages_label.setObjectName("imported_stages_label")
self._imported_stages_label.setText("imported stages")
self._replace_queue_list = QListWidget(self._central_widget)
self._replace_queue_list.setGeometry(QtCore.QRect(520, 80, 431, 401))
self._replace_queue_list.setObjectName("replace_queue_list")
self._replace_queue_list.setSelectionMode(QAbstractItemView.ExtendedSelection)
self._stages_to_be_replaced_label = QLabel(self._central_widget)
self._stages_to_be_replaced_label.setGeometry(QtCore.QRect(660, 50, 151, 31))
self._stages_to_be_replaced_label.setObjectName("stages_to_be_replaced_label")
self._stages_to_be_replaced_label.setText("stages to be replaced")
self._replace_btn = QPushButton(self._central_widget)
self._replace_btn.setGeometry(QtCore.QRect(670, 490, 131, 31))
self._replace_btn.setObjectName("replace_btn")
self._replace_btn.setText("replace!")
self._add_to_replace_btn = QPushButton(self._central_widget)
self._add_to_replace_btn.setGeometry(QtCore.QRect(460, 230, 41, 27))
self._add_to_replace_btn.setObjectName("add_to_replace_btn")
self._add_to_replace_btn.setText("->")
self._remove_from_replace_btn = QPushButton(self._central_widget)
self._remove_from_replace_btn.setGeometry(QtCore.QRect(460, 280, 41, 27))
self._remove_from_replace_btn.setObjectName("remove_from_replace_btn")
self._remove_from_replace_btn.setText("<-")
self._line = QFrame(self._central_widget)
self._line.setGeometry(QtCore.QRect(0, 40, 961, 20))
self._line.setFrameShape(QFrame.HLine)
self._line.setFrameShadow(QFrame.Sunken)
self._line.setObjectName("line")
self._add_single_stage_btn = QPushButton(self._central_widget)
self._add_single_stage_btn.setGeometry(QtCore.QRect(310, 490, 31, 27))
self._add_single_stage_btn.setObjectName("add_single_stage_btn")
self._add_single_stage_btn.setText("+")
self._remove_single_stage_btn = QPushButton(self._central_widget)
self._remove_single_stage_btn.setGeometry(QtCore.QRect(110, 490, 31, 27))
self._remove_single_stage_btn.setObjectName("remove_single_stage_btn")
self._remove_single_stage_btn.setText("-")
self._root_folder_label = QLabel(self._central_widget)
self._root_folder_label.setGeometry(QtCore.QRect(220, 16, 341, 21))
self._root_folder_label.setObjectName("root_folder_label")
mbreplacer.setCentralWidget(self._central_widget)
self._status_bar_label = QLabel(self._central_widget)
self._status_bar_label.setGeometry(QtCore.QRect(5, 525, 961, 24))
self._status_bar_label.setObjectName("status_bar_label")
mbreplacer.setWindowTitle("mbreplacer: stage replacer")
class MBReplacer(QMainWindow, MBReplacerUI):
def __init__(self):
QMainWindow.__init__(self)
MBReplacerUI.__init__(self)
self._setup_ui(self)
self._import_multiple_stages_btn.clicked.connect(self._import_multiple_stages_btn_clicked)
self._import_root_btn.clicked.connect(self._import_root_btn_clicked)
self._add_to_replace_btn.clicked.connect(self._add_to_replace_btn_clicked)
self._remove_from_replace_btn.clicked.connect(self._remove_from_replace_btn_clicked)
self._replace_btn.clicked.connect(self._replace_btn_clicked)
self._add_single_stage_btn.clicked.connect(self._add_single_stage_btn_clicked)
self._remove_single_stage_btn.clicked.connect(self._remove_single_stage_btn_clicked)
self._root_folder_path = None
self._imported_stages = []
self._stages_to_be_replaced = []
self._choose_stage_popup = ChooseStagePopup()
self._input_filenames_key, self._output_stage_id_key, self._is_valid_input_key = get_qt_data_keys(3)
# the tuple allows for replacement files for the given element. obj and mtl are required and have no replacement
# but for config we can take xml lz or lz.raw. let the order of the tuple denote priority (we want xml over all)
self._required_extensions = [("obj",), ("mtl",), ("xml", "lz", "lz.raw")]
self._required_tools = ['GxModelViewer.exe', 'ws2lzfrontend.exe', 'SMB_LZ_Tool.exe']
self._tool_filepaths = self._find_required_tools()
self._imported_obj_filepaths = []
self._replace_queue = []
self._temp_dir = os.path.join(get_mbreplacer_dir(), 'temp')
def _find_required_tools(self):
tool_filepaths = {}
[tool_filepaths.update({f: os.path.join(dp, f)})
for dp, dn, filenames in os.walk(get_mbreplacer_dir())
for f in filenames if f in self._required_tools]
return tool_filepaths
# button callbacks:
def _add_single_stage(self, obj_filepath):
import_stage_directory = os.path.dirname(obj_filepath)
import_stage_base_name = str(os.path.basename(obj_filepath).split(".")[0])
all_filenames = os.listdir(import_stage_directory)
collected_filepaths = {}
item_string = import_stage_base_name + " | has: ["
for required_extension in self._required_extensions:
for extension in required_extension:
filename = import_stage_base_name + "." + extension
if filename in all_filenames:
collected_filepaths[os.path.splitext(filename)[1][1:]] = os.path.join(import_stage_directory, filename)
item_string += extension + ", "
break
item_string = item_string[:-2] + "]"
all_textures_present = False
if 'mtl' in collected_filepaths:
with open(collected_filepaths['mtl'], 'r') as f:
required_textures = []
for line in f:
split_line = line.strip().split()
if split_line and split_line[0] == 'map_Kd':
if os.path.isabs(split_line[1]):
required_textures.append(split_line[1])
else:
required_textures.append(os.path.join(import_stage_directory, split_line[1]))
all_textures_present = all([os.path.exists(texture) for texture in required_textures])
item_string += " | textures: " + ("yes" if all_textures_present else "no")
with open(obj_filepath, 'r') as f:
obj_lines = f.readlines()
num_vertices = len([line for line in obj_lines if line.startswith('v ')])
num_faces = len([line for line in obj_lines if line.startswith('f ')])
item_string += " | v:" + str(num_vertices) + " f: " + str(num_faces)
all_inputs_met = len(collected_filepaths.keys()) == len(self._required_extensions) and all_textures_present
item = QListWidgetItem()
item.setData(self._input_filenames_key, collected_filepaths)
item.setData(self._is_valid_input_key, all_inputs_met)
item.setText(item_string)
item.setIcon(QIcon("resources/green_checkmark.png") if all_inputs_met else QIcon("resources/red_xmark.png"))
self._imported_stages_list.addItem(item)
return Status.OK
def _add_single_stage_btn_clicked(self):
file_dialog = QFileDialog()
obj_filepath = QFileDialog.getOpenFileName(file_dialog,
"import stage .obj file",
get_mbreplacer_dir(),
"*.obj")[0]
if obj_filepath in self._imported_obj_filepaths:
duplicate_idx = self._imported_obj_filepaths.index(obj_filepath)
duplicate_item = self._imported_stages_list.item(duplicate_idx)
self._imported_stages_list.takeItem(self._imported_stages_list.row(duplicate_item))
del self._imported_obj_filepaths[duplicate_idx]
if obj_filepath:
self._add_single_stage(obj_filepath)
self._imported_obj_filepaths.append(obj_filepath)
self._imported_stages_list.sortItems()
return Status.OK
def _remove_single_stage_btn_clicked(self):
selected_items = self._imported_stages_list.selectedItems()
for selected_item in selected_items:
self._imported_stages_list.takeItem(self._imported_stages_list.row(selected_item))
return Status.OK
def _import_multiple_stages_btn_clicked(self):
file_dialog = QFileDialog()
file_dialog.setParent(self.sender())
stages_folder_path = QFileDialog.getExistingDirectory(file_dialog,
"import folder with multiple objs/mtls/configs",
get_mbreplacer_dir())
stages_folder_path = QtCore.QDir.toNativeSeparators(stages_folder_path)
obj_filepaths = [os.path.join(dp, f)
for dp, dn, filenames in os.walk(stages_folder_path)
for f in filenames if os.path.splitext(f)[1] == '.obj']
for obj_filepath in obj_filepaths:
if obj_filepath in self._imported_obj_filepaths:
duplicate_idx = self._imported_obj_filepaths.index(obj_filepath)
duplicate_item = self._imported_stages_list.item(duplicate_idx)
self._imported_stages_list.takeItem(self._imported_stages_list.row(duplicate_item))
del self._imported_obj_filepaths[duplicate_idx]
self._add_single_stage(obj_filepath)
self._imported_obj_filepaths.append(obj_filepath)
if obj_filepaths:
self._imported_stages_list.sortItems()
return Status.OK
def _import_root_btn_clicked(self):
file_dialog = QFileDialog()
file_dialog.setParent(self.sender())
self._root_folder_path = QFileDialog.getExistingDirectory(file_dialog,
"import root folder extracted from .iso",
get_mbreplacer_dir())
self._root_folder_path = QtCore.QDir.toNativeSeparators(self._root_folder_path)
if not os.path.exists(os.path.join(self._root_folder_path, 'stage')):
self._root_folder_path = None
self._give_error_message("root folder seems to be invalid, no 'stage' folder found")
return
self._root_folder_label.setText(self._root_folder_path)
def _add_to_replace_btn_clicked(self):
selected_items = self._imported_stages_list.selectedItems()
if not selected_items:
return Status.OK
elif not all([selected_item.data(self._is_valid_input_key) for selected_item in selected_items]):
required = [', or '.join(required_extension) for required_extension in self._required_extensions]
self._give_error_message("Could not find all required files for one of the selected stages!\n"
"Please sure the required files are in the same directory as the .obj,\n"
"then reimport the stage!\n\n"
"Required Extensions: " + str(required) + "\n\n"
"Also requires that all linked textures are found. "
"(open the mtl file as txt to see the texture paths)\n\n"
)
return Status.WARN
else:
self._choose_stage_popup.setWindowModality(QtCore.Qt.WindowModal)
self._choose_stage_popup.connect(self._on_choose_stage)
self._choose_stage_popup.show()
return Status.OK
def _remove_from_replace_btn_clicked(self):
selected_items = self._replace_queue_list.selectedItems()
for i, selected_item in enumerate(selected_items):
self._replace_queue_list.takeItem(self._replace_queue_list.row(selected_item))
self._choose_stage_popup.remove_associated_stage(self._replace_queue[i][1])
return Status.OK
def _replace_stage_in_root(self, obj_filepath, config_filepath, stage_id):
config_ext = os.path.splitext(config_filepath)[1]
base_filepath = os.path.splitext(obj_filepath)[0]
gma_filepath = base_filepath + ".gma"
tpl_filepath = base_filepath + ".tpl"
lz_raw_filepath = base_filepath + ".lz.raw"
lz_filepath = os.path.splitext(lz_raw_filepath)[0]
needs_lz_raw_creation = config_ext == ".xml"
needs_lz_compression = config_ext == ".xml" or config_ext == ".raw"
if not needs_lz_compression and not needs_lz_raw_creation and not os.path.exists(lz_filepath):
self._give_error_message(".lz file promised not found")
return Status.WARN
tool_id = 'GxModelViewer.exe'
if tool_id not in self._tool_filepaths:
self._give_error_message("Cannot find tool: " + tool_id +
"\n\nPlease make sure the tool with this exact name "
"is somewhere in the mbreplacer directory")
return Status.WARN
# make gma and tpl in another thread while we do other things
gx_process = subprocess.Popen([self._tool_filepaths['GxModelViewer.exe'], obj_filepath])
# make .lz.raw
if needs_lz_raw_creation:
tool_id = 'ws2lzfrontend.exe'
if tool_id not in self._tool_filepaths:
self._give_error_message("Cannot find tool: " + tool_id +
"\n\nPlease make sure the tool with this exact name "
"is somewhere in the mbreplacer directory")
return Status.WARN
subprocess.call([self._tool_filepaths[tool_id], '-c', config_filepath, '-o', lz_raw_filepath, "-g", '2'])
if needs_lz_compression and not os.path.exists(lz_raw_filepath):
self._give_error_message("Failure to create .lz.raw file, ensure the config/obj/mtl files are valid, "
"as well as the ws2lzfrontend.exe tool")
return Status.WARN
# make .lz
if needs_lz_compression:
tool_id = 'SMB_LZ_Tool.exe'
if tool_id not in self._tool_filepaths:
self._give_error_message("Cannot find tool: " + tool_id +
"\n\nPlease make sure the tool with this exact name "
"is somewhere in the mbreplacer directory")
return
subprocess.call([self._tool_filepaths[tool_id], lz_raw_filepath])
if needs_lz_compression and not os.path.exists(lz_raw_filepath + '.lz'):
self._give_error_message("Failure to create .lz.raw file, ensure the config/obj/mtl files are valid, "
"as well as the ws2lzfrontend.exe tool")
return Status.WARN
if needs_lz_compression:
if os.path.exists(lz_filepath):
os.remove(lz_filepath)
os.rename(lz_raw_filepath + '.lz', lz_filepath)
os.remove(lz_raw_filepath)
# wait for the gx process to finish
gx_process.wait()
if not os.path.exists(gma_filepath) or not os.path.exists(tpl_filepath):
self._give_error_message("Failure to create gma and tpl files, ensure these files are correct, "
"as well as the GxModelViewer.exe (No GUI) tool")
return Status.WARN
stage_gma_filepath = os.path.join(self._root_folder_path, 'stage', 'st' + stage_id + '.gma')
stage_tpl_filepath = os.path.join(self._root_folder_path, 'stage', 'st' + stage_id + '.tpl')
stage_lz_filepath = os.path.join(self._root_folder_path, 'stage', 'STAGE' + stage_id + '.lz')
shutil.copy(gma_filepath, stage_gma_filepath)
shutil.copy(tpl_filepath, stage_tpl_filepath)
shutil.copy(lz_filepath, stage_lz_filepath)
return Status.OK
def _replace_btn_clicked(self):
if self._root_folder_path is None:
self._give_error_message("Please import your monkeyball root folder created by gamecube rebuilder")
return
self._tool_filepaths = self._find_required_tools()
for i in range(self._replace_queue_list.count()):
item = self._replace_queue_list.item(i)
input_filepaths = item.data(self._input_filenames_key)
obj_filepath = input_filepaths['obj']
config_filepath = [value for key, value in input_filepaths.items() if key != 'obj' and key != 'mtl'][0]
stage_id = item.data(self._output_stage_id_key)
status = self._replace_stage_in_root(obj_filepath, config_filepath, stage_id)
if status in (Status.WARN, Status.FAIL):
item.setIcon(QIcon("resources/red_xmark.png"))
return status
item.setIcon(QIcon("resources/green_checkmark.png"))
self._status_bar_label.setText("written " + os.path.basename(os.path.splitext(obj_filepath)[0]) + " to root")
return Status.OK
def _on_choose_stage(self):
if not self._choose_stage_popup.isActiveWindow():
return Status.OK
self._choose_stage_popup.close()
selected_items = self._imported_stages_list.selectedItems()
for selected_item in selected_items:
stage_index = self._choose_stage_popup.get_selected_stage_index()
replacement_stage_name = selected_item.text().split("|")[0][:-1]
# if theres a conflict or duplicate, remove it
if self._replace_queue:
stage_indices = list(zip(*self._replace_queue))[1]
# conflict
if stage_index in stage_indices:
conflict_index = stage_indices.index(stage_index)
conflict_item = self._replace_queue_list.item(conflict_index)
self._replace_queue_list.takeItem(self._replace_queue_list.row(conflict_item))
del self._replace_queue[conflict_index]
# duplicate
if (replacement_stage_name, stage_index) in self._replace_queue:
return Status.OK
self._choose_stage_popup.set_associated_stage(stage_index, replacement_stage_name)
item = QListWidgetItem()
item.setData(self._output_stage_id_key, self._choose_stage_popup.get_stage_id(stage_index))
item.setData(self._input_filenames_key, selected_item.data(self._input_filenames_key))
item_text = replacement_stage_name + " -> " + self._choose_stage_popup.get_stage_name(stage_index)
item.setText(item_text)
item.setIcon(QIcon("resources/gray_dot.png"))
self._replace_queue_list.addItem(item)
self._replace_queue.append((replacement_stage_name, stage_index))
self._choose_stage_popup.increment_stage_index()
return Status.OK
def _give_error_message(self, message, raise_exception=False):
error_message = QMessageBox()
error_message.setParent(self.sender())
error_message.setWindowTitle("ERROR")
error_message.setText(message)
error_message.setWindowModality(QtCore.Qt.WindowModal)
error_message.exec_()
if raise_exception:
raise Exception(message)
def get_mbreplacer_dir():
"""
Get the mbreplacer dir
:return str: mbreplacer root dir
"""
return os.getcwd()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MBReplacer()
window.show()
sys.exit(app.exec_())
| 46.881326
| 123
| 0.654395
|
import os
import shutil
import subprocess
import sys
from enum import Enum
from PyQt5 import QtCore
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QMainWindow, QApplication, QListWidgetItem, QFileDialog, QComboBox, QMessageBox, \
QAbstractItemView, QDialogButtonBox, QLabel, QWidget, QPushButton, QListWidget, QFrame, QProgressBar, QStatusBar
class Status(Enum):
OK = 0
WARN = 1
FAIL = 2
def get_qt_data_keys(num_keys):
assert num_keys <= 255 and "too many keys queried"
possible_keys = range(256)
used_keys = list(map(int, [QtCore.Qt.CheckStateRole,
QtCore.Qt.DecorationRole,
QtCore.Qt.AccessibleDescriptionRole,
QtCore.Qt.AccessibleTextRole,
QtCore.Qt.BackgroundColorRole,
QtCore.Qt.BackgroundRole,
QtCore.Qt.DisplayRole,
QtCore.Qt.EditRole,
QtCore.Qt.FontRole,
QtCore.Qt.ForegroundRole,
QtCore.Qt.InitialSortOrderRole,
QtCore.Qt.SizeHintRole,
QtCore.Qt.StatusTipRole,
QtCore.Qt.TextAlignmentRole,
QtCore.Qt.TextColorRole,
QtCore.Qt.ToolTipRole,
QtCore.Qt.UserRole,
QtCore.Qt.WhatsThisRole]))
c, keys = 0, []
for key in possible_keys:
if c < num_keys and key not in used_keys:
keys.append(key)
c += 1
return keys
class ChooseStagePopupUI:
def __init__(self):
self._stage_select_combobox = None
self._dialog_button_box = None
self._choose_stage_label = None
self._stage_base_names = []
def _setup_ui(self, choose_stage_popup):
choose_stage_popup.setObjectName("choose_stage_popupI")
choose_stage_popup.resize(493, 108)
self._stage_select_combobox = QComboBox(choose_stage_popup)
self._stage_select_combobox.setGeometry(QtCore.QRect(10, 30, 471, 27))
self._stage_select_combobox.setObjectName("stage_select_combobox")
self._load_stages()
self._dialog_button_box = QDialogButtonBox(choose_stage_popup)
self._dialog_button_box.setGeometry(QtCore.QRect(150, 70, 176, 27))
self._dialog_button_box.setStandardButtons(QDialogButtonBox.Cancel | QDialogButtonBox.Ok)
self._dialog_button_box.setObjectName("dialog_button_box")
self._dialog_button_box.rejected.connect(self.close)
self._choose_stage_label = QLabel(choose_stage_popup)
self._choose_stage_label.setGeometry(QtCore.QRect(10, 10, 461, 17))
self._choose_stage_label.setObjectName("choose_stage_label")
self._choose_stage_label.setText("Choose monkeyball stage to replace (Challenge Mode)")
choose_stage_popup.setWindowTitle("Choose Stage to Replace")
def _load_stages(self):
with open(os.path.join(get_mbreplacer_dir(), 'resources', 'challenge_stages_list.txt'), 'r') as f:
for line in f:
clean_line = line.strip()
self._stage_select_combobox.addItem(clean_line)
self._stage_base_names.append(clean_line)
class ChooseStagePopup(QMainWindow, ChooseStagePopupUI):
def __init__(self):
QMainWindow.__init__(self)
ChooseStagePopupUI.__init__(self)
self._setup_ui(self)
def connect(self, callback):
self._dialog_button_box.accepted.connect(callback)
def get_selected_stage_index(self):
return int(self._stage_select_combobox.currentIndex())
def set_associated_stage(self, index, associated_stage):
self._stage_select_combobox.setItemText(index, self._stage_base_names[index] + " [{}]".format(associated_stage))
def remove_associated_stage(self, stage_index):
self._stage_select_combobox.setItemText(stage_index, self._stage_base_names[stage_index])
def get_stage_name(self, index):
return self._stage_base_names[index].split(":")[1][1:]
def get_stage_id(self, index):
return self._stage_base_names[index].split(":")[0]
def increment_stage_index(self):
current_idx = self._stage_select_combobox.currentIndex()
if current_idx == self._stage_select_combobox.count() - 1:
current_idx = 0
else:
current_idx += 1
self._stage_select_combobox.setCurrentIndex(current_idx)
class MBReplacerUI:
def __init__(self):
self._central_widget = None
self._import_multiple_stages_btn = None
self._import_root_btn = None
self._imported_stages_list = None
self._imported_stages_label = None
self._replace_queue_list = None
self._stages_to_be_replaced_label = None
self._replace_btn = None
self._add_to_replace_btn = None
self._remove_from_replace_btn = None
self._progress_bar = None
self._line = None
self._add_single_stage_btn = None
self._remove_single_stage_btn = None
self._status_bar = None
def _setup_ui(self, mbreplacer):
mbreplacer.setObjectName("mbreplacer")
mbreplacer.resize(961, 545)
self._central_widget = QWidget(mbreplacer)
self._central_widget.setObjectName("centralWidget")
self._import_multiple_stages_btn = QPushButton(self._central_widget)
self._import_multiple_stages_btn.setGeometry(QtCore.QRect(150, 490, 151, 27))
self._import_multiple_stages_btn.setObjectName("import_multiple_stages_btn")
self._import_multiple_stages_btn.setText("import multiple from folder")
self._import_root_btn = QPushButton(self._central_widget)
self._import_root_btn.setGeometry(QtCore.QRect(10, 10, 161, 31))
self._import_root_btn.setObjectName("import_root_btn")
self._import_root_btn.setText("import root folder")
self._imported_stages_list = QListWidget(self._central_widget)
self._imported_stages_list.setGeometry(QtCore.QRect(10, 80, 431, 401))
self._imported_stages_list.setObjectName("imported_stages_list")
self._imported_stages_list.setSelectionMode(QAbstractItemView.ExtendedSelection)
self._imported_stages_label = QLabel(self._central_widget)
self._imported_stages_label.setGeometry(QtCore.QRect(170, 50, 111, 31))
self._imported_stages_label.setObjectName("imported_stages_label")
self._imported_stages_label.setText("imported stages")
self._replace_queue_list = QListWidget(self._central_widget)
self._replace_queue_list.setGeometry(QtCore.QRect(520, 80, 431, 401))
self._replace_queue_list.setObjectName("replace_queue_list")
self._replace_queue_list.setSelectionMode(QAbstractItemView.ExtendedSelection)
self._stages_to_be_replaced_label = QLabel(self._central_widget)
self._stages_to_be_replaced_label.setGeometry(QtCore.QRect(660, 50, 151, 31))
self._stages_to_be_replaced_label.setObjectName("stages_to_be_replaced_label")
self._stages_to_be_replaced_label.setText("stages to be replaced")
self._replace_btn = QPushButton(self._central_widget)
self._replace_btn.setGeometry(QtCore.QRect(670, 490, 131, 31))
self._replace_btn.setObjectName("replace_btn")
self._replace_btn.setText("replace!")
self._add_to_replace_btn = QPushButton(self._central_widget)
self._add_to_replace_btn.setGeometry(QtCore.QRect(460, 230, 41, 27))
self._add_to_replace_btn.setObjectName("add_to_replace_btn")
self._add_to_replace_btn.setText("->")
self._remove_from_replace_btn = QPushButton(self._central_widget)
self._remove_from_replace_btn.setGeometry(QtCore.QRect(460, 280, 41, 27))
self._remove_from_replace_btn.setObjectName("remove_from_replace_btn")
self._remove_from_replace_btn.setText("<-")
self._line = QFrame(self._central_widget)
self._line.setGeometry(QtCore.QRect(0, 40, 961, 20))
self._line.setFrameShape(QFrame.HLine)
self._line.setFrameShadow(QFrame.Sunken)
self._line.setObjectName("line")
self._add_single_stage_btn = QPushButton(self._central_widget)
self._add_single_stage_btn.setGeometry(QtCore.QRect(310, 490, 31, 27))
self._add_single_stage_btn.setObjectName("add_single_stage_btn")
self._add_single_stage_btn.setText("+")
self._remove_single_stage_btn = QPushButton(self._central_widget)
self._remove_single_stage_btn.setGeometry(QtCore.QRect(110, 490, 31, 27))
self._remove_single_stage_btn.setObjectName("remove_single_stage_btn")
self._remove_single_stage_btn.setText("-")
self._root_folder_label = QLabel(self._central_widget)
self._root_folder_label.setGeometry(QtCore.QRect(220, 16, 341, 21))
self._root_folder_label.setObjectName("root_folder_label")
mbreplacer.setCentralWidget(self._central_widget)
self._status_bar_label = QLabel(self._central_widget)
self._status_bar_label.setGeometry(QtCore.QRect(5, 525, 961, 24))
self._status_bar_label.setObjectName("status_bar_label")
mbreplacer.setWindowTitle("mbreplacer: stage replacer")
class MBReplacer(QMainWindow, MBReplacerUI):
def __init__(self):
QMainWindow.__init__(self)
MBReplacerUI.__init__(self)
self._setup_ui(self)
self._import_multiple_stages_btn.clicked.connect(self._import_multiple_stages_btn_clicked)
self._import_root_btn.clicked.connect(self._import_root_btn_clicked)
self._add_to_replace_btn.clicked.connect(self._add_to_replace_btn_clicked)
self._remove_from_replace_btn.clicked.connect(self._remove_from_replace_btn_clicked)
self._replace_btn.clicked.connect(self._replace_btn_clicked)
self._add_single_stage_btn.clicked.connect(self._add_single_stage_btn_clicked)
self._remove_single_stage_btn.clicked.connect(self._remove_single_stage_btn_clicked)
self._root_folder_path = None
self._imported_stages = []
self._stages_to_be_replaced = []
self._choose_stage_popup = ChooseStagePopup()
self._input_filenames_key, self._output_stage_id_key, self._is_valid_input_key = get_qt_data_keys(3)
self._required_extensions = [("obj",), ("mtl",), ("xml", "lz", "lz.raw")]
self._required_tools = ['GxModelViewer.exe', 'ws2lzfrontend.exe', 'SMB_LZ_Tool.exe']
self._tool_filepaths = self._find_required_tools()
self._imported_obj_filepaths = []
self._replace_queue = []
self._temp_dir = os.path.join(get_mbreplacer_dir(), 'temp')
def _find_required_tools(self):
tool_filepaths = {}
[tool_filepaths.update({f: os.path.join(dp, f)})
for dp, dn, filenames in os.walk(get_mbreplacer_dir())
for f in filenames if f in self._required_tools]
return tool_filepaths
def _add_single_stage(self, obj_filepath):
import_stage_directory = os.path.dirname(obj_filepath)
import_stage_base_name = str(os.path.basename(obj_filepath).split(".")[0])
all_filenames = os.listdir(import_stage_directory)
collected_filepaths = {}
item_string = import_stage_base_name + " | has: ["
for required_extension in self._required_extensions:
for extension in required_extension:
filename = import_stage_base_name + "." + extension
if filename in all_filenames:
collected_filepaths[os.path.splitext(filename)[1][1:]] = os.path.join(import_stage_directory, filename)
item_string += extension + ", "
break
item_string = item_string[:-2] + "]"
all_textures_present = False
if 'mtl' in collected_filepaths:
with open(collected_filepaths['mtl'], 'r') as f:
required_textures = []
for line in f:
split_line = line.strip().split()
if split_line and split_line[0] == 'map_Kd':
if os.path.isabs(split_line[1]):
required_textures.append(split_line[1])
else:
required_textures.append(os.path.join(import_stage_directory, split_line[1]))
all_textures_present = all([os.path.exists(texture) for texture in required_textures])
item_string += " | textures: " + ("yes" if all_textures_present else "no")
with open(obj_filepath, 'r') as f:
obj_lines = f.readlines()
num_vertices = len([line for line in obj_lines if line.startswith('v ')])
num_faces = len([line for line in obj_lines if line.startswith('f ')])
item_string += " | v:" + str(num_vertices) + " f: " + str(num_faces)
all_inputs_met = len(collected_filepaths.keys()) == len(self._required_extensions) and all_textures_present
item = QListWidgetItem()
item.setData(self._input_filenames_key, collected_filepaths)
item.setData(self._is_valid_input_key, all_inputs_met)
item.setText(item_string)
item.setIcon(QIcon("resources/green_checkmark.png") if all_inputs_met else QIcon("resources/red_xmark.png"))
self._imported_stages_list.addItem(item)
return Status.OK
def _add_single_stage_btn_clicked(self):
file_dialog = QFileDialog()
obj_filepath = QFileDialog.getOpenFileName(file_dialog,
"import stage .obj file",
get_mbreplacer_dir(),
"*.obj")[0]
if obj_filepath in self._imported_obj_filepaths:
duplicate_idx = self._imported_obj_filepaths.index(obj_filepath)
duplicate_item = self._imported_stages_list.item(duplicate_idx)
self._imported_stages_list.takeItem(self._imported_stages_list.row(duplicate_item))
del self._imported_obj_filepaths[duplicate_idx]
if obj_filepath:
self._add_single_stage(obj_filepath)
self._imported_obj_filepaths.append(obj_filepath)
self._imported_stages_list.sortItems()
return Status.OK
def _remove_single_stage_btn_clicked(self):
selected_items = self._imported_stages_list.selectedItems()
for selected_item in selected_items:
self._imported_stages_list.takeItem(self._imported_stages_list.row(selected_item))
return Status.OK
def _import_multiple_stages_btn_clicked(self):
file_dialog = QFileDialog()
file_dialog.setParent(self.sender())
stages_folder_path = QFileDialog.getExistingDirectory(file_dialog,
"import folder with multiple objs/mtls/configs",
get_mbreplacer_dir())
stages_folder_path = QtCore.QDir.toNativeSeparators(stages_folder_path)
obj_filepaths = [os.path.join(dp, f)
for dp, dn, filenames in os.walk(stages_folder_path)
for f in filenames if os.path.splitext(f)[1] == '.obj']
for obj_filepath in obj_filepaths:
if obj_filepath in self._imported_obj_filepaths:
duplicate_idx = self._imported_obj_filepaths.index(obj_filepath)
duplicate_item = self._imported_stages_list.item(duplicate_idx)
self._imported_stages_list.takeItem(self._imported_stages_list.row(duplicate_item))
del self._imported_obj_filepaths[duplicate_idx]
self._add_single_stage(obj_filepath)
self._imported_obj_filepaths.append(obj_filepath)
if obj_filepaths:
self._imported_stages_list.sortItems()
return Status.OK
def _import_root_btn_clicked(self):
file_dialog = QFileDialog()
file_dialog.setParent(self.sender())
self._root_folder_path = QFileDialog.getExistingDirectory(file_dialog,
"import root folder extracted from .iso",
get_mbreplacer_dir())
self._root_folder_path = QtCore.QDir.toNativeSeparators(self._root_folder_path)
if not os.path.exists(os.path.join(self._root_folder_path, 'stage')):
self._root_folder_path = None
self._give_error_message("root folder seems to be invalid, no 'stage' folder found")
return
self._root_folder_label.setText(self._root_folder_path)
def _add_to_replace_btn_clicked(self):
selected_items = self._imported_stages_list.selectedItems()
if not selected_items:
return Status.OK
elif not all([selected_item.data(self._is_valid_input_key) for selected_item in selected_items]):
required = [', or '.join(required_extension) for required_extension in self._required_extensions]
self._give_error_message("Could not find all required files for one of the selected stages!\n"
"Please sure the required files are in the same directory as the .obj,\n"
"then reimport the stage!\n\n"
"Required Extensions: " + str(required) + "\n\n"
"Also requires that all linked textures are found. "
"(open the mtl file as txt to see the texture paths)\n\n"
)
return Status.WARN
else:
self._choose_stage_popup.setWindowModality(QtCore.Qt.WindowModal)
self._choose_stage_popup.connect(self._on_choose_stage)
self._choose_stage_popup.show()
return Status.OK
def _remove_from_replace_btn_clicked(self):
selected_items = self._replace_queue_list.selectedItems()
for i, selected_item in enumerate(selected_items):
self._replace_queue_list.takeItem(self._replace_queue_list.row(selected_item))
self._choose_stage_popup.remove_associated_stage(self._replace_queue[i][1])
return Status.OK
def _replace_stage_in_root(self, obj_filepath, config_filepath, stage_id):
config_ext = os.path.splitext(config_filepath)[1]
base_filepath = os.path.splitext(obj_filepath)[0]
gma_filepath = base_filepath + ".gma"
tpl_filepath = base_filepath + ".tpl"
lz_raw_filepath = base_filepath + ".lz.raw"
lz_filepath = os.path.splitext(lz_raw_filepath)[0]
needs_lz_raw_creation = config_ext == ".xml"
needs_lz_compression = config_ext == ".xml" or config_ext == ".raw"
if not needs_lz_compression and not needs_lz_raw_creation and not os.path.exists(lz_filepath):
self._give_error_message(".lz file promised not found")
return Status.WARN
tool_id = 'GxModelViewer.exe'
if tool_id not in self._tool_filepaths:
self._give_error_message("Cannot find tool: " + tool_id +
"\n\nPlease make sure the tool with this exact name "
"is somewhere in the mbreplacer directory")
return Status.WARN
gx_process = subprocess.Popen([self._tool_filepaths['GxModelViewer.exe'], obj_filepath])
if needs_lz_raw_creation:
tool_id = 'ws2lzfrontend.exe'
if tool_id not in self._tool_filepaths:
self._give_error_message("Cannot find tool: " + tool_id +
"\n\nPlease make sure the tool with this exact name "
"is somewhere in the mbreplacer directory")
return Status.WARN
subprocess.call([self._tool_filepaths[tool_id], '-c', config_filepath, '-o', lz_raw_filepath, "-g", '2'])
if needs_lz_compression and not os.path.exists(lz_raw_filepath):
self._give_error_message("Failure to create .lz.raw file, ensure the config/obj/mtl files are valid, "
"as well as the ws2lzfrontend.exe tool")
return Status.WARN
if needs_lz_compression:
tool_id = 'SMB_LZ_Tool.exe'
if tool_id not in self._tool_filepaths:
self._give_error_message("Cannot find tool: " + tool_id +
"\n\nPlease make sure the tool with this exact name "
"is somewhere in the mbreplacer directory")
return
subprocess.call([self._tool_filepaths[tool_id], lz_raw_filepath])
if needs_lz_compression and not os.path.exists(lz_raw_filepath + '.lz'):
self._give_error_message("Failure to create .lz.raw file, ensure the config/obj/mtl files are valid, "
"as well as the ws2lzfrontend.exe tool")
return Status.WARN
if needs_lz_compression:
if os.path.exists(lz_filepath):
os.remove(lz_filepath)
os.rename(lz_raw_filepath + '.lz', lz_filepath)
os.remove(lz_raw_filepath)
gx_process.wait()
if not os.path.exists(gma_filepath) or not os.path.exists(tpl_filepath):
self._give_error_message("Failure to create gma and tpl files, ensure these files are correct, "
"as well as the GxModelViewer.exe (No GUI) tool")
return Status.WARN
stage_gma_filepath = os.path.join(self._root_folder_path, 'stage', 'st' + stage_id + '.gma')
stage_tpl_filepath = os.path.join(self._root_folder_path, 'stage', 'st' + stage_id + '.tpl')
stage_lz_filepath = os.path.join(self._root_folder_path, 'stage', 'STAGE' + stage_id + '.lz')
shutil.copy(gma_filepath, stage_gma_filepath)
shutil.copy(tpl_filepath, stage_tpl_filepath)
shutil.copy(lz_filepath, stage_lz_filepath)
return Status.OK
def _replace_btn_clicked(self):
if self._root_folder_path is None:
self._give_error_message("Please import your monkeyball root folder created by gamecube rebuilder")
return
self._tool_filepaths = self._find_required_tools()
for i in range(self._replace_queue_list.count()):
item = self._replace_queue_list.item(i)
input_filepaths = item.data(self._input_filenames_key)
obj_filepath = input_filepaths['obj']
config_filepath = [value for key, value in input_filepaths.items() if key != 'obj' and key != 'mtl'][0]
stage_id = item.data(self._output_stage_id_key)
status = self._replace_stage_in_root(obj_filepath, config_filepath, stage_id)
if status in (Status.WARN, Status.FAIL):
item.setIcon(QIcon("resources/red_xmark.png"))
return status
item.setIcon(QIcon("resources/green_checkmark.png"))
self._status_bar_label.setText("written " + os.path.basename(os.path.splitext(obj_filepath)[0]) + " to root")
return Status.OK
def _on_choose_stage(self):
if not self._choose_stage_popup.isActiveWindow():
return Status.OK
self._choose_stage_popup.close()
selected_items = self._imported_stages_list.selectedItems()
for selected_item in selected_items:
stage_index = self._choose_stage_popup.get_selected_stage_index()
replacement_stage_name = selected_item.text().split("|")[0][:-1]
if self._replace_queue:
stage_indices = list(zip(*self._replace_queue))[1]
if stage_index in stage_indices:
conflict_index = stage_indices.index(stage_index)
conflict_item = self._replace_queue_list.item(conflict_index)
self._replace_queue_list.takeItem(self._replace_queue_list.row(conflict_item))
del self._replace_queue[conflict_index]
if (replacement_stage_name, stage_index) in self._replace_queue:
return Status.OK
self._choose_stage_popup.set_associated_stage(stage_index, replacement_stage_name)
item = QListWidgetItem()
item.setData(self._output_stage_id_key, self._choose_stage_popup.get_stage_id(stage_index))
item.setData(self._input_filenames_key, selected_item.data(self._input_filenames_key))
item_text = replacement_stage_name + " -> " + self._choose_stage_popup.get_stage_name(stage_index)
item.setText(item_text)
item.setIcon(QIcon("resources/gray_dot.png"))
self._replace_queue_list.addItem(item)
self._replace_queue.append((replacement_stage_name, stage_index))
self._choose_stage_popup.increment_stage_index()
return Status.OK
def _give_error_message(self, message, raise_exception=False):
error_message = QMessageBox()
error_message.setParent(self.sender())
error_message.setWindowTitle("ERROR")
error_message.setText(message)
error_message.setWindowModality(QtCore.Qt.WindowModal)
error_message.exec_()
if raise_exception:
raise Exception(message)
def get_mbreplacer_dir():
return os.getcwd()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MBReplacer()
window.show()
sys.exit(app.exec_())
| true
| true
|
7906b6df6ae9fa0e10762b9a6e8c27d7ce6cddf4
| 25,614
|
py
|
Python
|
SloppyCell/ReactionNetworks/OptDesign.py
|
robertvsiii/sloppycell
|
caf6daa09f2202acccf26ad31890fddaf4af82e8
|
[
"BSD-3-Clause"
] | null | null | null |
SloppyCell/ReactionNetworks/OptDesign.py
|
robertvsiii/sloppycell
|
caf6daa09f2202acccf26ad31890fddaf4af82e8
|
[
"BSD-3-Clause"
] | 1
|
2019-04-15T21:08:12.000Z
|
2019-04-15T21:08:12.000Z
|
SloppyCell/ReactionNetworks/OptDesign.py
|
jurquiza/SloppyCellUrquiza2019
|
a9f64d9d4172c82735813f09e48f36777a714e9c
|
[
"BSD-3-Clause"
] | null | null | null |
import scipy, copy
import SloppyCell.Utility
load = SloppyCell.Utility.load
save = SloppyCell.Utility.save
import SloppyCell.ReactionNetworks.Dynamics as Dynamics
try:
import SloppyCell.Plotting as Plotting
except ImportError:
pass
def setup(paramfile,calcobject,senstrajfile,jtjfile) :
""" Set up the quantities necessary to run the optimal design
algorithms. NOTE: This function needs to be called first
before any of the optimal design functions can be called.
paramfile: the name of a pickled file containing the
best fit parameters in KeyedList format
calcobject: the calculation object for which we are doing the
optimal design. (Note that in general, may be searching a
design over many different calculations, but here we only
consider one. Thus, we set design_sentraj equal to senstraj)
senstrajfile: the name of the file containing the pickled
sensitivity trajectory for the calculation, calcobject,
for the set of parameters in paramfile.
jtjfile: the name of the file containing the pickled Fisher
Information Matrix (J^t J) for the current set of data and
for the parameters in paramfile.
NOTE: The derivatives computed for J^tJ need to be with respect
to the *log* of the parameters
"""
import OptDesign as v
v.curp = load(paramfile)
v.jtj = load(jtjfile)
v.clc = calcobject
v.senstraj = load(senstrajfile)
v.design_senstraj = v.senstraj
v.p_names_ordered = v.curp.keys()
v.jtjdict = {}
for pindex1,pname1 in enumerate(v.p_names_ordered) :
for pindex2,pname2 in enumerate(v.p_names_ordered) :
v.jtjdict[(pname1,pname2)] = v.jtj[pindex1][pindex2]
v.ovvarnames = v.clc.optimizableVars.keys()
v.jtjtrunc = scipy.zeros((len(v.ovvarnames),len(v.ovvarnames)),scipy.float_)
# The number of optimizable variables for the calculation we are
# considering might be less than the number of parameters for the
# whole model. We are only working with this calculation so we
# need to trim down the J^t J (Fisher information) matrix
# accordingly
for pindex1,pname1 in enumerate(v.ovvarnames) :
for pindex2,pname2 in enumerate(v.ovvarnames) :
v.jtjtrunc[pindex1][pindex2] = v.jtjdict[(pname1,pname2)]
def make_sens_traj(calcobject,params,times,senstrajfilename):
""" Make the sensitivity trajectory for the calculation
calcoject (same as in setup(...) above).
params: parameters as a KeyedList, sensitivity traj is
calculated at these parameters (should be same as in paramfile
in setup(...) above)
times: the timepoints in the sensitivity trajectory (1-d array)
senstrajfilename: the file to save the sensitivity trajectory to
Note that if times is very finely spaced, the
sensitivity trajectory will need a lot of storage space """
senstraj = Dynamics.integrate_sensitivity(calcobject, times, params, 1.0e-6)
save(senstraj,senstrajfilename)
def design_over_chems(chemnames,designchemnames,logprior=1.0e20) :
"""
chemnames = list of unmeasurable chemicals
designchemnames = list of measurable chemicals
logprior = prior on params, e.g. logprior = log(1000.0) means
parameter standard deviation will be less than a factor of 1000.0
Out of the list chemnames, find the best chemical and
best time point, that most reduces the integrated variance
over designchemnames """
times = design_senstraj.timepoints
trunc_times = [times[i] for i in scipy.arange(0,len(times),1)]
best_change = 0.0 # the change should always be negative
best_chem = "None"
best_time = "None"
for dchemname in designchemnames :
print "On design chemical ", dchemname
for t in trunc_times :
sensvect_design = get_sens_vect(dchemname,t)
# NOTE: assuming a 10% error on the measurement --- use 10% of the
# maximum value in the trajectory
maxval = max(design_senstraj.get_var_traj(dchemname)) + 1.0
sensvect_design = sensvect_design/(.1*maxval)
intvar_change = integrated_var_change(chemnames,sensvect_design,logprior)
tot_change = 0.0
for id in chemnames :
tot_change = tot_change + intvar_change[id]
if tot_change < best_change :
best_change = tot_change
best_chem = dchemname
best_time = t
return best_change, best_chem, best_time
def design_over_single_variance(sensvect,designchemnames,logprior=1.0e20) :
"""
sensvect : a sensitivity vector (length = # of params) of
unmeasurable quantity of interest
designchemnames : list of measurable chemicals
sensvect could be the sensitivity of a single chemical at a
single timepoint; then can use method get_sens_vect (see elsewhere
in this file) to compute this sensitivity vector. In that
case we are designing over the species variance at that single point
"""
times = senstraj.timepoints
trunc_times = [times[i] for i in scipy.arange(0,len(times),5)]
best_change = 0.0 # the change should always be negative
best_chem = "None"
best_time = "None"
for dchemname in designchemnames :
for t in trunc_times :
sensvect_design = get_sens_vect(dchemname,t)
var_change = single_variance_change(sensvect,sensvect_design,logprior)
if var_change < best_change :
best_change = var_change
best_chem = dchemname
best_time = t
return best_change, best_chem, best_time
def variances(chemnames,logprior=1.0e20) :
""" chemnames : list of chemical names for which the
variance at all timepoints will be computed
logprior : prior on parameters. logprior = log(1000.0)
means params allowed to vary by about a factor of 1000.0
return values :
times: times of the trajectory
bestfit: a dictionary of best fit trajectories (keys are entries in chemnames)
var: a dictionary of variances (keys are entries in chemnames)
"""
#senstraj = load('EndogenousEGFR3T3sensNoPriors')
times = senstraj.timepoints
jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*scipy.eye(
len(jtjtrunc),len(jtjtrunc)))
var = {}
bestfit = {}
optvarkeys = clc.optimizableVars.keys()
first = optvarkeys[0]
last = optvarkeys[-1]
for name in chemnames :
var[name] = []
bestfit[name] = []
chemindex = senstraj.key_column.get(name)
index1sens = senstraj.key_column.get((name,first))
index2sens = senstraj.key_column.get((name,last))
sensarray_this_chem = copy.copy(senstraj.values[:,index1sens:(index2sens+1)])
# Turn sensitivities into sensitivities with respect to log parameters
for j, pname in enumerate(ovvarnames) :
sensarray_this_chem[:,j] = sensarray_this_chem[:,j]*curp.get(pname)
tmp = scipy.dot(sensarray_this_chem,jtjinv)
for i in range(len(tmp[:,0])) :
var[name].append(scipy.dot(tmp[i,:],sensarray_this_chem[i,:]))
bestfit[name] = senstraj.values[:,chemindex]
var[name] = scipy.asarray(var[name])
return times, bestfit, var
def variances_log_chems(chemnames,logprior=1.0e20) :
""" Same as above except the variances are now on the
logs of the chemicals trajectories.
"""
#senstraj = load('EndogenousEGFR3T3sensNoPriors')
times = senstraj.timepoints
jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*scipy.eye(
len(jtjtrunc),len(jtjtrunc)))
var = {}
bestfit = {}
optvarkeys = clc.optimizableVars.keys()
first = optvarkeys[0]
last = optvarkeys[-1]
for name in chemnames :
var[name] = []
bestfit[name] = []
chemindex = senstraj.key_column.get(name)
index1sens = senstraj.key_column.get((name,first))
index2sens = senstraj.key_column.get((name,last))
sensarray_this_chem = copy.copy(senstraj.values[:,index1sens:(index2sens+1)])
traj_this_chem = copy.copy(senstraj.values[:,chemindex])
for j, pname in enumerate(ovvarnames) :
sensarray_this_chem[:,j] = sensarray_this_chem[:,j]*curp.get(pname)
# need to scale each row by 1/chemvalue to mimic a derivative w.r.t.
# log chemicals. Add a small value to chemvalue to avoid divide by zero
for i in range(len(times)) :
sensarray_this_chem[i,:] = sensarray_this_chem[i,:]/(traj_this_chem[i]+1.0e-6)
tmp = scipy.dot(sensarray_this_chem,jtjinv)
for i in range(len(tmp[:,0])) :
var[name].append(scipy.dot(tmp[i,:],sensarray_this_chem[i,:]))
bestfit[name] = senstraj.values[:,chemindex]
var[name] = scipy.asarray(var[name])
return times,bestfit,var
def single_variance(sensvect,logprior=1.0e20) :
""" Get the variance for a single function of parameters
that has a sensitivity vector sensvect. Useful for looking at
variances in parameter combinations, or simple functions of
parameters. Note that if we are concerned with ratios and
products of parameters, it's often best to consider sensvect
as a sensitivity w.r.t. log parameters """
jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*scipy.eye(
len(jtjtrunc),len(jtjtrunc)))
tmp = scipy.dot(jtjinv,sensvect)
var = scipy.dot(sensvect,tmp)
return var
def variance_change(chemnames,sensvect_design,logprior=1.0e20) :
"""
chemnames : list of chemical names at which we will look
at variance
sensvect_design : the sensitivity vector (one by no. params array) at
the new design point.
returns : (times, varchange)
the times and the change in variances at those times (should
be negative) for each of the chemicals in chemnames, after the
addition of the new timepoint. varchange is a dictionary
indexed by entries in chemnames.
"""
times = senstraj.timepoints
n = len(jtjtrunc)
jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*scipy.eye(n,n))
#sensvect_design = scipy.resize(sensvect_design,(n,1))
jtjinv_design = scipy.dot(jtjinv,sensvect_design)
#jtjinv_design = scipy.resize(jtjinv_design,(n,1)) # want a column vector
denominator = 1.0 + scipy.dot(sensvect_design,jtjinv_design)
varchange = {}
optvarkeys = clc.optimizableVars.keys()
first = optvarkeys[0]
last = optvarkeys[-1]
for name in chemnames :
varchange[name] = []
chemindex = senstraj.key_column.get(name)
index1sens = senstraj.key_column.get((name,first))
index2sens = senstraj.key_column.get((name,last))
sensarray_this_chem = copy.copy(senstraj.values[:,index1sens:(index2sens+1)])
for j, pname in enumerate(ovvarnames) :
sensarray_this_chem[:,j] = sensarray_this_chem[:,j]*curp.get(pname)
product = scipy.dot(sensarray_this_chem,jtjinv_design)
# this product is a number of timepoints by one vector, we need to
# square each element for the final formula
varchange[name] = -scipy.asarray(product**2/denominator)
return times, varchange
def single_variance_change(sensvect,sensvect_design,logprior=1.0e20) :
"""
sensvect : given a single function f(p) of parameters, this is the
derivative w.r.t. each of the parameters (in log parameters). For
ratios or products of rate constants, f(p) is a linear function
sensvect_design : the sensitivity vector of the new point in the
design you wish to add
returns: the variance change of the quantity f(p), given the
addition of the new data point, with sensitivity vector sensvect_design.
"""
n = len(jtjtrunc)
jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*scipy.eye(n,n))
jtjinv_design = scipy.dot(jtjinv,sensvect_design)
denominator = 1.0 + scipy.dot(sensvect_design,jtjinv_design)
product = scipy.dot(sensvect,jtjinv_design)
return -product**2/denominator
def get_sens_vect(chemname,time) :
""" get a sensitivity vector for a chemical "chemname" at a
time, time """
tindex = design_senstraj._get_time_index(time,1.0e-4)
optvarkeys = clc.optimizableVars.keys()
first = optvarkeys[0]
last = optvarkeys[-1]
index1sens = design_senstraj.key_column.get((chemname,first))
index2sens = design_senstraj.key_column.get((chemname,last))
sens_vect = copy.copy(
design_senstraj.values[tindex,index1sens:(index2sens+1)])
for j, pname in enumerate(ovvarnames) :
sens_vect[j] = sens_vect[j]*curp.get(pname)
return sens_vect
def get_sens_array(chemname) :
""" get an array of sens_vects for all the times the chemical is defined
and convert to log sensitivities """
optvarkeys = clc.optimizableVars.keys()
first = optvarkeys[0]
last = optvarkeys[-1]
chemindex = design_senstraj.key_column.get(chemname)
index1sens = design_senstraj.key_column.get((chemname,first))
index2sens = design_senstraj.key_column.get((chemname,last))
sensarray_this_chem = copy.copy(
design_senstraj.values[:,index1sens:(index2sens+1)])
for j, pname in enumerate(ovvarnames) :
sensarray_this_chem[:,j] = sensarray_this_chem[:,j]*curp.get(pname)
return sensarray_this_chem
def integrated_var_change(chemnames,sensvect_design,logprior=1.0e20) :
times, varchange = variance_change(chemnames,sensvect_design,logprior)
int_varchange = {}
for name in varchange.keys() :
int_varchange[name] = scipy.integrate.simps(varchange[name],times)
return int_varchange
def var_change_weighted(weights,chemnames,sensarray_design,logprior=1.0e20) :
""" This is similar to var_change except now we pass in a sensarray
instead of sensvect --- this is a matrix of sensvects aligned rowwise.
Row i will be multiplied by sqrt(weights[i]) where sum(weights)=1 and
each weight is a number between zero and one. We will return the
change in variance for all the chemicals in chemnames """
# we use the formula (Sherman-Woodbury-Morrison)
# (A+UV^t)^(-1) = A^(-1) - A^(-1)*U*(I + V^T*A^(-1)*U)^(-1)*V^t*A^(-1)
# where U = V and V^t = W^(1/2)*sensarray_design
times = senstraj.timepoints
ntimes = len(times)
k,n = sensarray_design.shape
jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*scipy.eye(n,n))
Vt = scipy.zeros((k,n),scipy.float_)
for i in range(k) :
Vt[i,:] = scipy.sqrt(weights[i])*sensarray_design[i,:]
design_jtjinv = scipy.dot(Vt,jtjinv)
#jtjinv_design = scipy.resize(jtjinv_design,(n,1)) # want a column vector
denominator = scipy.eye(k,k) + \
scipy.dot(design_jtjinv,scipy.transpose(Vt))
inv_denom = scipy.linalg.inv(denominator)
varchange = {}
optvarkeys = clc.optimizableVars.keys()
first = optvarkeys[0]
last = optvarkeys[-1]
for name in chemnames :
varchange[name] = []
chemindex = senstraj.key_column.get(name)
index1sens = senstraj.key_column.get((name,first))
index2sens = senstraj.key_column.get((name,last))
sensarray_this_chem = copy.copy(senstraj.values[:,index1sens:(index2sens+1)])
for j, pname in enumerate(ovvarnames) :
sensarray_this_chem[:,j] = sensarray_this_chem[:,j]*curp.get(pname)
product = scipy.dot(design_jtjinv,
scipy.transpose(sensarray_this_chem))
# each column vector of this matrix has to be dotted through the
# denominator matrix --- each column is a different time point
for j in range(ntimes) :
quadprod = scipy.dot(product[:,j],inv_denom)
quadprod = scipy.dot(quadprod,product[:,j])
varchange[name].append(-quadprod)
varchange[name] = scipy.asarray(varchange[name])
return times, varchange
def integrated_var_change_weighted(weights,chemnames,sensarray_design,logprior=1.0e20) :
times, varchange = var_change_weighted(weights,chemnames,sensarray_design,
logprior)
intvarchange = {}
for name in varchange.keys() :
intvarchange[name] = scipy.integrate.simps(varchange[name],times)
return intvarchange
def weight_cost(weights,chemnames,sensarray_design,logprior=1.0e20) :
""" For this cost function we're going to assume unconstrained
variables are being passed in, so we need to convert them to
a range between 0 and 1. The sum of the weights should also = 1 """
weights0to1 = weights_trans(weights)
# now weights lie between 0 and 1
weights0to1 = weights0to1/scipy.sum(weights0to1) # this makes sure
# weights sum up to 1.
intvarchange = integrated_var_change_weighted(weights0to1,chemnames,
sensarray_design,logprior)
cost = 0.0
for n in intvarchange.keys() :
cost = cost + intvarchange[n]
return cost
def weights_trans(weights) :
wtrans = (scipy.sin(weights)+1.0)/2.0
return wtrans
def weights_inv_trans(transweights) :
w = scipy.arcsin(2.0*transweights-1.0)
return w
def minimize_weight_cost(weights,chemnames,sensarray_design,logprior=1.0e20) :
"""
weights : a vector of positive numbers with length the same as the number of
rows of sensarray_design. The weights should sum to 1
chemnames: a list of unmeasurable chemical names over which we wish
to design experiments
sensarray_design: an array of sensitivities of measurable chemicals
or just an array of sensitivity vectors, each row a different
sensitivity vector
logprior : prior on parameters. logprior = log(1000.0) allows parameters
to fluctuate by a factor of 1000 """
weights_trans = scipy.arcsin(2.0*weights-1.0)
# maxiter may need to be increased if convergence is not apparent
# or if the number of weights is increased
w = scipy.optimize.fmin(weight_cost,weights_trans,maxiter = 10000,
args=(chemnames,sensarray_design,logprior))
woptnotnormed = (scipy.sin(w)+1.0)/2.0
wopt = woptnotnormed/scipy.sum(woptnotnormed)
return woptnotnormed,wopt
def plot_variances(chemnames,logprior,scale=1.0,return_var = False) :
"""
chemnames: list of chemical names
logprior: prior on params. logprior = log(1000.0) means parameters
allowed to fluctuate by a factor of 1000 """
times, bestfit, var = variances(chemnames,logprior)
for key in bestfit.keys() :
Plotting.figure()
Plotting.plot(times,bestfit[key]/scale)
Plotting.hold(True)
Plotting.plot(times,bestfit[key]/scale + scipy.sqrt(var[key])/scale,'r--')
Plotting.plot(times,bestfit[key]/scale - scipy.sqrt(var[key])/scale,'r--')
Plotting.title(key,fontsize=16)
Plotting.xlabel('time (minutes)',fontsize=16)
Plotting.ylabel('number of molecules',fontsize=16)
xtics = Plotting.gca().get_xticklabels()
ytics = Plotting.gca().get_yticklabels()
Plotting.setp(xtics,size=16)
Plotting.setp(ytics,size=16)
#Plotting.axis([0.0,40.0,-.01,1.2e4])
Plotting.show()
if return_var :
return times, bestfit, var
def plot_variances_log_chems(chemnames,logprior) :
"""
chemnames: list of chemical names
logprior: prior on params
Plots the standard deviation of the chemicals when the variance
is computed using logs of the chemical trajectories. This
makes sure the final plots do not have best_fit+-stddev that
do not become negative """
times, bestfit, var = variances_log_chems(chemnames,logprior)
for key in bestfit.keys() :
Plotting.figure()
Plotting.plot(times,bestfit[key])
Plotting.hold(True)
Plotting.plot(times,bestfit[key]*scipy.exp(scipy.sqrt(var[key])),'r-')
Plotting.plot(times,bestfit[key]*scipy.exp(-scipy.sqrt(var[key])),'r-')
Plotting.title(key,fontsize=14)
Plotting.xlabel('time')
Plotting.ylabel('arb. units')
#Plotting.axis([0.0,40.0,-.01,1.2e4])
Plotting.show()
def plot_variance_newpoint(chemnames,sensvect_design,logprior=1.0e20,
return_data = True) :
"""
chemnames: list of chemical names
sensvect_design: a sensivity vector of a quantity that is
measurable
This will plot the old and new variances of the chemicals in
chemnames, given a new measurement that has sensitivity vector
sensvect_design
"""
times,bestfit,var = variances(chemnames,logprior)
times,varchange = variance_change(chemnames,sensvect_design,logprior)
for key in bestfit.keys() :
Plotting.figure()
Plotting.plot(times,bestfit[key])
Plotting.hold(True)
Plotting.plot(times,bestfit[key] + scipy.sqrt(var[key]),'r-')
Plotting.plot(times,bestfit[key] - scipy.sqrt(var[key]),'r-')
Plotting.plot(times,bestfit[key] + scipy.sqrt(var[key]+varchange[key]),'k--')
Plotting.plot(times,bestfit[key] - scipy.sqrt(var[key]+varchange[key]),'k--')
Plotting.title(key,fontsize=14)
Plotting.xlabel('time')
Plotting.ylabel('arb. units')
Plotting.axis([0.0,40.0,-.01,1.2e4])
Plotting.show()
if return_data :
newvar = {}
for ky in var.keys() :
newvar[ky] = var[key] + varchange[key]
return times,bestfit,newvar
def plot_variance_newweights(weights,chemnames,sensarray_design,logprior=1.0e20,scale=1.0,return_data = True) :
"""
weights : a proposed set of weights for each of the row vectors in
sensarray_design
chemnames : a list of chemicals for which we will plot the variance
logprior : as before
This will plot the old and new variances on chemnames, similar to
above.
NOTE: the weights that are passed in do not necessarily have to sum to
one. e.g. if the weights are normalized such that max(weights) = 1, then
by scaling all the weights by 1/sigma, you are then assuming that
the most accurate measurement has an error of size sigma. sigma for
example could be 20% of the maximum value of a trajectory.
"""
times,bestfit,var = variances(chemnames,logprior)
times,varchange = var_change_weighted(weights,chemnames,sensarray_design,logprior)
for key in bestfit.keys() :
Plotting.figure()
Plotting.plot(times,scale*bestfit[key])
Plotting.hold(True)
Plotting.plot(times,scale*bestfit[key] + scale*scipy.sqrt(var[key]),'r-')
Plotting.plot(times,scale*bestfit[key] - scale*scipy.sqrt(var[key]),'r-')
Plotting.plot(times,scale*bestfit[key] + scale*scipy.sqrt(var[key]+varchange[key]),'k--')
Plotting.plot(times,scale*bestfit[key] - scale*scipy.sqrt(var[key]+varchange[key]),'k--')
Plotting.title(key,fontsize=14)
Plotting.xlabel('time')
Plotting.ylabel('arb. units')
Plotting.axis([0.0,40.0,-.01,1.2e4])
Plotting.show()
if return_data :
newvar = {}
for ky in var.keys() :
newvar[ky] = var[key] + varchange[key]
return times,bestfit,newvar
def plot_variances_subplot(chemnames,logprior) :
times, bestfit, var = variances(chemnames,logprior)
nallplots = len(chemnames)
# 9 at a time
nfigs = nallplots/9 # integer division -- no fractional part
for figno in range(1,nfigs+1) :
Plotting.figure()
for i in range(0,9) :
Plotting.subplot(3,3,i+1)
chemind = i+(figno-1)*9
Plotting.plot(times,bestfit[chemnames[chemind]])
Plotting.hold(True)
Plotting.plot(times,bestfit[chemnames[chemind]]
+ scipy.sqrt(var[chemnames[chemind]]),'r-')
Plotting.plot(times,bestfit[chemnames[chemind]]
- scipy.sqrt(var[chemnames[chemind]]),'r-')
yt = Plotting.yticks()
Plotting.axis([0,100.0,yt[0],yt[-1]])
Plotting.title(chemnames[chemind])
Plotting.xlabel('time')
Plotting.ylabel('arb. units')
xt = Plotting.xticks()
Plotting.xticks([xt[0],xt[-1]])
Plotting.savefig('./figs/variance_wt_'+i.__str__()+'.ps')
Plotting.show()
#def fix_sf():
# make sure scale factors get computed --- easiest way is
# to compute the cost
# print "cost is ", m.cost(curp)
# sfs = m.internalVars['scaleFactors']
# for exptname in sfs.keys() :
# fixeddict = sfs[exptname]
# m.exptColl[exptname].set_fixed_sf(fixeddict)
# just check
# print "cost is now", m.cost(curp)
def reduce_size(array,skipsize) :
""" reduce_size takes an array of dimension m,n and
returns an array with every skipsize row sampled.
"""
size = array.shape
newsize = len(scipy.arange(0,size[0],skipsize))
if len(size) == 1 : # a vector
newvect = scipy.zeros((newsize,),scipy.float_)
for iind,i in enumerate(scipy.arange(0,size[0],skipsize)) :
newvect[iind] = array[i]
return newvect
elif len(size) == 2 : # an array
newarray = scipy.zeros((newsize,size[1]),scipy.float_)
for iind,i in enumerate(scipy.arange(0,size[0],skipsize)) :
newarray[iind] = array[i]
return newarray
| 43.487267
| 111
| 0.666979
|
import scipy, copy
import SloppyCell.Utility
load = SloppyCell.Utility.load
save = SloppyCell.Utility.save
import SloppyCell.ReactionNetworks.Dynamics as Dynamics
try:
import SloppyCell.Plotting as Plotting
except ImportError:
pass
def setup(paramfile,calcobject,senstrajfile,jtjfile) :
""" Set up the quantities necessary to run the optimal design
algorithms. NOTE: This function needs to be called first
before any of the optimal design functions can be called.
paramfile: the name of a pickled file containing the
best fit parameters in KeyedList format
calcobject: the calculation object for which we are doing the
optimal design. (Note that in general, may be searching a
design over many different calculations, but here we only
consider one. Thus, we set design_sentraj equal to senstraj)
senstrajfile: the name of the file containing the pickled
sensitivity trajectory for the calculation, calcobject,
for the set of parameters in paramfile.
jtjfile: the name of the file containing the pickled Fisher
Information Matrix (J^t J) for the current set of data and
for the parameters in paramfile.
NOTE: The derivatives computed for J^tJ need to be with respect
to the *log* of the parameters
"""
import OptDesign as v
v.curp = load(paramfile)
v.jtj = load(jtjfile)
v.clc = calcobject
v.senstraj = load(senstrajfile)
v.design_senstraj = v.senstraj
v.p_names_ordered = v.curp.keys()
v.jtjdict = {}
for pindex1,pname1 in enumerate(v.p_names_ordered) :
for pindex2,pname2 in enumerate(v.p_names_ordered) :
v.jtjdict[(pname1,pname2)] = v.jtj[pindex1][pindex2]
v.ovvarnames = v.clc.optimizableVars.keys()
v.jtjtrunc = scipy.zeros((len(v.ovvarnames),len(v.ovvarnames)),scipy.float_)
for pindex1,pname1 in enumerate(v.ovvarnames) :
for pindex2,pname2 in enumerate(v.ovvarnames) :
v.jtjtrunc[pindex1][pindex2] = v.jtjdict[(pname1,pname2)]
def make_sens_traj(calcobject,params,times,senstrajfilename):
""" Make the sensitivity trajectory for the calculation
calcoject (same as in setup(...) above).
params: parameters as a KeyedList, sensitivity traj is
calculated at these parameters (should be same as in paramfile
in setup(...) above)
times: the timepoints in the sensitivity trajectory (1-d array)
senstrajfilename: the file to save the sensitivity trajectory to
Note that if times is very finely spaced, the
sensitivity trajectory will need a lot of storage space """
senstraj = Dynamics.integrate_sensitivity(calcobject, times, params, 1.0e-6)
save(senstraj,senstrajfilename)
def design_over_chems(chemnames,designchemnames,logprior=1.0e20) :
"""
chemnames = list of unmeasurable chemicals
designchemnames = list of measurable chemicals
logprior = prior on params, e.g. logprior = log(1000.0) means
parameter standard deviation will be less than a factor of 1000.0
Out of the list chemnames, find the best chemical and
best time point, that most reduces the integrated variance
over designchemnames """
times = design_senstraj.timepoints
trunc_times = [times[i] for i in scipy.arange(0,len(times),1)]
best_change = 0.0
best_chem = "None"
best_time = "None"
for dchemname in designchemnames :
print "On design chemical ", dchemname
for t in trunc_times :
sensvect_design = get_sens_vect(dchemname,t)
maxval = max(design_senstraj.get_var_traj(dchemname)) + 1.0
sensvect_design = sensvect_design/(.1*maxval)
intvar_change = integrated_var_change(chemnames,sensvect_design,logprior)
tot_change = 0.0
for id in chemnames :
tot_change = tot_change + intvar_change[id]
if tot_change < best_change :
best_change = tot_change
best_chem = dchemname
best_time = t
return best_change, best_chem, best_time
def design_over_single_variance(sensvect,designchemnames,logprior=1.0e20) :
"""
sensvect : a sensitivity vector (length = # of params) of
unmeasurable quantity of interest
designchemnames : list of measurable chemicals
sensvect could be the sensitivity of a single chemical at a
single timepoint; then can use method get_sens_vect (see elsewhere
in this file) to compute this sensitivity vector. In that
case we are designing over the species variance at that single point
"""
times = senstraj.timepoints
trunc_times = [times[i] for i in scipy.arange(0,len(times),5)]
best_change = 0.0
best_chem = "None"
best_time = "None"
for dchemname in designchemnames :
for t in trunc_times :
sensvect_design = get_sens_vect(dchemname,t)
var_change = single_variance_change(sensvect,sensvect_design,logprior)
if var_change < best_change :
best_change = var_change
best_chem = dchemname
best_time = t
return best_change, best_chem, best_time
def variances(chemnames,logprior=1.0e20) :
""" chemnames : list of chemical names for which the
variance at all timepoints will be computed
logprior : prior on parameters. logprior = log(1000.0)
means params allowed to vary by about a factor of 1000.0
return values :
times: times of the trajectory
bestfit: a dictionary of best fit trajectories (keys are entries in chemnames)
var: a dictionary of variances (keys are entries in chemnames)
"""
times = senstraj.timepoints
jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*scipy.eye(
len(jtjtrunc),len(jtjtrunc)))
var = {}
bestfit = {}
optvarkeys = clc.optimizableVars.keys()
first = optvarkeys[0]
last = optvarkeys[-1]
for name in chemnames :
var[name] = []
bestfit[name] = []
chemindex = senstraj.key_column.get(name)
index1sens = senstraj.key_column.get((name,first))
index2sens = senstraj.key_column.get((name,last))
sensarray_this_chem = copy.copy(senstraj.values[:,index1sens:(index2sens+1)])
for j, pname in enumerate(ovvarnames) :
sensarray_this_chem[:,j] = sensarray_this_chem[:,j]*curp.get(pname)
tmp = scipy.dot(sensarray_this_chem,jtjinv)
for i in range(len(tmp[:,0])) :
var[name].append(scipy.dot(tmp[i,:],sensarray_this_chem[i,:]))
bestfit[name] = senstraj.values[:,chemindex]
var[name] = scipy.asarray(var[name])
return times, bestfit, var
def variances_log_chems(chemnames,logprior=1.0e20) :
""" Same as above except the variances are now on the
logs of the chemicals trajectories.
"""
times = senstraj.timepoints
jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*scipy.eye(
len(jtjtrunc),len(jtjtrunc)))
var = {}
bestfit = {}
optvarkeys = clc.optimizableVars.keys()
first = optvarkeys[0]
last = optvarkeys[-1]
for name in chemnames :
var[name] = []
bestfit[name] = []
chemindex = senstraj.key_column.get(name)
index1sens = senstraj.key_column.get((name,first))
index2sens = senstraj.key_column.get((name,last))
sensarray_this_chem = copy.copy(senstraj.values[:,index1sens:(index2sens+1)])
traj_this_chem = copy.copy(senstraj.values[:,chemindex])
for j, pname in enumerate(ovvarnames) :
sensarray_this_chem[:,j] = sensarray_this_chem[:,j]*curp.get(pname)
for i in range(len(times)) :
sensarray_this_chem[i,:] = sensarray_this_chem[i,:]/(traj_this_chem[i]+1.0e-6)
tmp = scipy.dot(sensarray_this_chem,jtjinv)
for i in range(len(tmp[:,0])) :
var[name].append(scipy.dot(tmp[i,:],sensarray_this_chem[i,:]))
bestfit[name] = senstraj.values[:,chemindex]
var[name] = scipy.asarray(var[name])
return times,bestfit,var
def single_variance(sensvect,logprior=1.0e20) :
""" Get the variance for a single function of parameters
that has a sensitivity vector sensvect. Useful for looking at
variances in parameter combinations, or simple functions of
parameters. Note that if we are concerned with ratios and
products of parameters, it's often best to consider sensvect
as a sensitivity w.r.t. log parameters """
jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*scipy.eye(
len(jtjtrunc),len(jtjtrunc)))
tmp = scipy.dot(jtjinv,sensvect)
var = scipy.dot(sensvect,tmp)
return var
def variance_change(chemnames,sensvect_design,logprior=1.0e20) :
"""
chemnames : list of chemical names at which we will look
at variance
sensvect_design : the sensitivity vector (one by no. params array) at
the new design point.
returns : (times, varchange)
the times and the change in variances at those times (should
be negative) for each of the chemicals in chemnames, after the
addition of the new timepoint. varchange is a dictionary
indexed by entries in chemnames.
"""
times = senstraj.timepoints
n = len(jtjtrunc)
jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*scipy.eye(n,n))
#sensvect_design = scipy.resize(sensvect_design,(n,1))
jtjinv_design = scipy.dot(jtjinv,sensvect_design)
#jtjinv_design = scipy.resize(jtjinv_design,(n,1)) # want a column vector
denominator = 1.0 + scipy.dot(sensvect_design,jtjinv_design)
varchange = {}
optvarkeys = clc.optimizableVars.keys()
first = optvarkeys[0]
last = optvarkeys[-1]
for name in chemnames :
varchange[name] = []
chemindex = senstraj.key_column.get(name)
index1sens = senstraj.key_column.get((name,first))
index2sens = senstraj.key_column.get((name,last))
sensarray_this_chem = copy.copy(senstraj.values[:,index1sens:(index2sens+1)])
for j, pname in enumerate(ovvarnames) :
sensarray_this_chem[:,j] = sensarray_this_chem[:,j]*curp.get(pname)
product = scipy.dot(sensarray_this_chem,jtjinv_design)
# this product is a number of timepoints by one vector, we need to
# square each element for the final formula
varchange[name] = -scipy.asarray(product**2/denominator)
return times, varchange
def single_variance_change(sensvect,sensvect_design,logprior=1.0e20) :
"""
sensvect : given a single function f(p) of parameters, this is the
derivative w.r.t. each of the parameters (in log parameters). For
ratios or products of rate constants, f(p) is a linear function
sensvect_design : the sensitivity vector of the new point in the
design you wish to add
returns: the variance change of the quantity f(p), given the
addition of the new data point, with sensitivity vector sensvect_design.
"""
n = len(jtjtrunc)
jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*scipy.eye(n,n))
jtjinv_design = scipy.dot(jtjinv,sensvect_design)
denominator = 1.0 + scipy.dot(sensvect_design,jtjinv_design)
product = scipy.dot(sensvect,jtjinv_design)
return -product**2/denominator
def get_sens_vect(chemname,time) :
""" get a sensitivity vector for a chemical "chemname" at a
time, time """
tindex = design_senstraj._get_time_index(time,1.0e-4)
optvarkeys = clc.optimizableVars.keys()
first = optvarkeys[0]
last = optvarkeys[-1]
index1sens = design_senstraj.key_column.get((chemname,first))
index2sens = design_senstraj.key_column.get((chemname,last))
sens_vect = copy.copy(
design_senstraj.values[tindex,index1sens:(index2sens+1)])
for j, pname in enumerate(ovvarnames) :
sens_vect[j] = sens_vect[j]*curp.get(pname)
return sens_vect
def get_sens_array(chemname) :
""" get an array of sens_vects for all the times the chemical is defined
and convert to log sensitivities """
optvarkeys = clc.optimizableVars.keys()
first = optvarkeys[0]
last = optvarkeys[-1]
chemindex = design_senstraj.key_column.get(chemname)
index1sens = design_senstraj.key_column.get((chemname,first))
index2sens = design_senstraj.key_column.get((chemname,last))
sensarray_this_chem = copy.copy(
design_senstraj.values[:,index1sens:(index2sens+1)])
for j, pname in enumerate(ovvarnames) :
sensarray_this_chem[:,j] = sensarray_this_chem[:,j]*curp.get(pname)
return sensarray_this_chem
def integrated_var_change(chemnames,sensvect_design,logprior=1.0e20) :
times, varchange = variance_change(chemnames,sensvect_design,logprior)
int_varchange = {}
for name in varchange.keys() :
int_varchange[name] = scipy.integrate.simps(varchange[name],times)
return int_varchange
def var_change_weighted(weights,chemnames,sensarray_design,logprior=1.0e20) :
""" This is similar to var_change except now we pass in a sensarray
instead of sensvect --- this is a matrix of sensvects aligned rowwise.
Row i will be multiplied by sqrt(weights[i]) where sum(weights)=1 and
each weight is a number between zero and one. We will return the
change in variance for all the chemicals in chemnames """
# we use the formula (Sherman-Woodbury-Morrison)
# (A+UV^t)^(-1) = A^(-1) - A^(-1)*U*(I + V^T*A^(-1)*U)^(-1)*V^t*A^(-1)
# where U = V and V^t = W^(1/2)*sensarray_design
times = senstraj.timepoints
ntimes = len(times)
k,n = sensarray_design.shape
jtjinv = scipy.linalg.inv(jtjtrunc+1.0/logprior**2*scipy.eye(n,n))
Vt = scipy.zeros((k,n),scipy.float_)
for i in range(k) :
Vt[i,:] = scipy.sqrt(weights[i])*sensarray_design[i,:]
design_jtjinv = scipy.dot(Vt,jtjinv)
#jtjinv_design = scipy.resize(jtjinv_design,(n,1)) # want a column vector
denominator = scipy.eye(k,k) + \
scipy.dot(design_jtjinv,scipy.transpose(Vt))
inv_denom = scipy.linalg.inv(denominator)
varchange = {}
optvarkeys = clc.optimizableVars.keys()
first = optvarkeys[0]
last = optvarkeys[-1]
for name in chemnames :
varchange[name] = []
chemindex = senstraj.key_column.get(name)
index1sens = senstraj.key_column.get((name,first))
index2sens = senstraj.key_column.get((name,last))
sensarray_this_chem = copy.copy(senstraj.values[:,index1sens:(index2sens+1)])
for j, pname in enumerate(ovvarnames) :
sensarray_this_chem[:,j] = sensarray_this_chem[:,j]*curp.get(pname)
product = scipy.dot(design_jtjinv,
scipy.transpose(sensarray_this_chem))
# each column vector of this matrix has to be dotted through the
# denominator matrix --- each column is a different time point
for j in range(ntimes) :
quadprod = scipy.dot(product[:,j],inv_denom)
quadprod = scipy.dot(quadprod,product[:,j])
varchange[name].append(-quadprod)
varchange[name] = scipy.asarray(varchange[name])
return times, varchange
def integrated_var_change_weighted(weights,chemnames,sensarray_design,logprior=1.0e20) :
times, varchange = var_change_weighted(weights,chemnames,sensarray_design,
logprior)
intvarchange = {}
for name in varchange.keys() :
intvarchange[name] = scipy.integrate.simps(varchange[name],times)
return intvarchange
def weight_cost(weights,chemnames,sensarray_design,logprior=1.0e20) :
""" For this cost function we're going to assume unconstrained
variables are being passed in, so we need to convert them to
a range between 0 and 1. The sum of the weights should also = 1 """
weights0to1 = weights_trans(weights)
weights0to1 = weights0to1/scipy.sum(weights0to1)
intvarchange = integrated_var_change_weighted(weights0to1,chemnames,
sensarray_design,logprior)
cost = 0.0
for n in intvarchange.keys() :
cost = cost + intvarchange[n]
return cost
def weights_trans(weights) :
wtrans = (scipy.sin(weights)+1.0)/2.0
return wtrans
def weights_inv_trans(transweights) :
w = scipy.arcsin(2.0*transweights-1.0)
return w
def minimize_weight_cost(weights,chemnames,sensarray_design,logprior=1.0e20) :
"""
weights : a vector of positive numbers with length the same as the number of
rows of sensarray_design. The weights should sum to 1
chemnames: a list of unmeasurable chemical names over which we wish
to design experiments
sensarray_design: an array of sensitivities of measurable chemicals
or just an array of sensitivity vectors, each row a different
sensitivity vector
logprior : prior on parameters. logprior = log(1000.0) allows parameters
to fluctuate by a factor of 1000 """
weights_trans = scipy.arcsin(2.0*weights-1.0)
w = scipy.optimize.fmin(weight_cost,weights_trans,maxiter = 10000,
args=(chemnames,sensarray_design,logprior))
woptnotnormed = (scipy.sin(w)+1.0)/2.0
wopt = woptnotnormed/scipy.sum(woptnotnormed)
return woptnotnormed,wopt
def plot_variances(chemnames,logprior,scale=1.0,return_var = False) :
"""
chemnames: list of chemical names
logprior: prior on params. logprior = log(1000.0) means parameters
allowed to fluctuate by a factor of 1000 """
times, bestfit, var = variances(chemnames,logprior)
for key in bestfit.keys() :
Plotting.figure()
Plotting.plot(times,bestfit[key]/scale)
Plotting.hold(True)
Plotting.plot(times,bestfit[key]/scale + scipy.sqrt(var[key])/scale,'r--')
Plotting.plot(times,bestfit[key]/scale - scipy.sqrt(var[key])/scale,'r--')
Plotting.title(key,fontsize=16)
Plotting.xlabel('time (minutes)',fontsize=16)
Plotting.ylabel('number of molecules',fontsize=16)
xtics = Plotting.gca().get_xticklabels()
ytics = Plotting.gca().get_yticklabels()
Plotting.setp(xtics,size=16)
Plotting.setp(ytics,size=16)
Plotting.show()
if return_var :
return times, bestfit, var
def plot_variances_log_chems(chemnames,logprior) :
"""
chemnames: list of chemical names
logprior: prior on params
Plots the standard deviation of the chemicals when the variance
is computed using logs of the chemical trajectories. This
makes sure the final plots do not have best_fit+-stddev that
do not become negative """
times, bestfit, var = variances_log_chems(chemnames,logprior)
for key in bestfit.keys() :
Plotting.figure()
Plotting.plot(times,bestfit[key])
Plotting.hold(True)
Plotting.plot(times,bestfit[key]*scipy.exp(scipy.sqrt(var[key])),'r-')
Plotting.plot(times,bestfit[key]*scipy.exp(-scipy.sqrt(var[key])),'r-')
Plotting.title(key,fontsize=14)
Plotting.xlabel('time')
Plotting.ylabel('arb. units')
Plotting.show()
def plot_variance_newpoint(chemnames,sensvect_design,logprior=1.0e20,
return_data = True) :
"""
chemnames: list of chemical names
sensvect_design: a sensivity vector of a quantity that is
measurable
This will plot the old and new variances of the chemicals in
chemnames, given a new measurement that has sensitivity vector
sensvect_design
"""
times,bestfit,var = variances(chemnames,logprior)
times,varchange = variance_change(chemnames,sensvect_design,logprior)
for key in bestfit.keys() :
Plotting.figure()
Plotting.plot(times,bestfit[key])
Plotting.hold(True)
Plotting.plot(times,bestfit[key] + scipy.sqrt(var[key]),'r-')
Plotting.plot(times,bestfit[key] - scipy.sqrt(var[key]),'r-')
Plotting.plot(times,bestfit[key] + scipy.sqrt(var[key]+varchange[key]),'k--')
Plotting.plot(times,bestfit[key] - scipy.sqrt(var[key]+varchange[key]),'k--')
Plotting.title(key,fontsize=14)
Plotting.xlabel('time')
Plotting.ylabel('arb. units')
Plotting.axis([0.0,40.0,-.01,1.2e4])
Plotting.show()
if return_data :
newvar = {}
for ky in var.keys() :
newvar[ky] = var[key] + varchange[key]
return times,bestfit,newvar
def plot_variance_newweights(weights,chemnames,sensarray_design,logprior=1.0e20,scale=1.0,return_data = True) :
"""
weights : a proposed set of weights for each of the row vectors in
sensarray_design
chemnames : a list of chemicals for which we will plot the variance
logprior : as before
This will plot the old and new variances on chemnames, similar to
above.
NOTE: the weights that are passed in do not necessarily have to sum to
one. e.g. if the weights are normalized such that max(weights) = 1, then
by scaling all the weights by 1/sigma, you are then assuming that
the most accurate measurement has an error of size sigma. sigma for
example could be 20% of the maximum value of a trajectory.
"""
times,bestfit,var = variances(chemnames,logprior)
times,varchange = var_change_weighted(weights,chemnames,sensarray_design,logprior)
for key in bestfit.keys() :
Plotting.figure()
Plotting.plot(times,scale*bestfit[key])
Plotting.hold(True)
Plotting.plot(times,scale*bestfit[key] + scale*scipy.sqrt(var[key]),'r-')
Plotting.plot(times,scale*bestfit[key] - scale*scipy.sqrt(var[key]),'r-')
Plotting.plot(times,scale*bestfit[key] + scale*scipy.sqrt(var[key]+varchange[key]),'k--')
Plotting.plot(times,scale*bestfit[key] - scale*scipy.sqrt(var[key]+varchange[key]),'k--')
Plotting.title(key,fontsize=14)
Plotting.xlabel('time')
Plotting.ylabel('arb. units')
Plotting.axis([0.0,40.0,-.01,1.2e4])
Plotting.show()
if return_data :
newvar = {}
for ky in var.keys() :
newvar[ky] = var[key] + varchange[key]
return times,bestfit,newvar
def plot_variances_subplot(chemnames,logprior) :
times, bestfit, var = variances(chemnames,logprior)
nallplots = len(chemnames)
nfigs = nallplots/9
for figno in range(1,nfigs+1) :
Plotting.figure()
for i in range(0,9) :
Plotting.subplot(3,3,i+1)
chemind = i+(figno-1)*9
Plotting.plot(times,bestfit[chemnames[chemind]])
Plotting.hold(True)
Plotting.plot(times,bestfit[chemnames[chemind]]
+ scipy.sqrt(var[chemnames[chemind]]),'r-')
Plotting.plot(times,bestfit[chemnames[chemind]]
- scipy.sqrt(var[chemnames[chemind]]),'r-')
yt = Plotting.yticks()
Plotting.axis([0,100.0,yt[0],yt[-1]])
Plotting.title(chemnames[chemind])
Plotting.xlabel('time')
Plotting.ylabel('arb. units')
xt = Plotting.xticks()
Plotting.xticks([xt[0],xt[-1]])
Plotting.savefig('./figs/variance_wt_'+i.__str__()+'.ps')
Plotting.show()
def reduce_size(array,skipsize) :
""" reduce_size takes an array of dimension m,n and
returns an array with every skipsize row sampled.
"""
size = array.shape
newsize = len(scipy.arange(0,size[0],skipsize))
if len(size) == 1 :
newvect = scipy.zeros((newsize,),scipy.float_)
for iind,i in enumerate(scipy.arange(0,size[0],skipsize)) :
newvect[iind] = array[i]
return newvect
elif len(size) == 2 :
newarray = scipy.zeros((newsize,size[1]),scipy.float_)
for iind,i in enumerate(scipy.arange(0,size[0],skipsize)) :
newarray[iind] = array[i]
return newarray
| false
| true
|
7906b78142c8c770febd3d20161bc5a68a398b79
| 1,463
|
py
|
Python
|
byteslib/byteslib.py
|
MaxTurchin/pycopy-lib
|
d7a69fc2a28031e2ca475c29239f715c1809d8cc
|
[
"PSF-2.0"
] | null | null | null |
byteslib/byteslib.py
|
MaxTurchin/pycopy-lib
|
d7a69fc2a28031e2ca475c29239f715c1809d8cc
|
[
"PSF-2.0"
] | null | null | null |
byteslib/byteslib.py
|
MaxTurchin/pycopy-lib
|
d7a69fc2a28031e2ca475c29239f715c1809d8cc
|
[
"PSF-2.0"
] | null | null | null |
# This file is part of the standard library of Pycopy project, minimalist
# and lightweight Python implementation.
#
# https://github.com/pfalcon/pycopy
# https://github.com/pfalcon/pycopy-lib
#
# The MIT License (MIT)
#
# Copyright (c) 2019 Paul Sokolovsky
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import ubinascii
def hex(s):
s = ubinascii.hexlify(s)
s.__class__ = str
return s
def fromhex(s):
return ubinascii.unhexlify(s)
| 36.575
| 79
| 0.762816
|
import ubinascii
def hex(s):
s = ubinascii.hexlify(s)
s.__class__ = str
return s
def fromhex(s):
return ubinascii.unhexlify(s)
| true
| true
|
7906b7a30acc043e9f8e405399c3c6e7f070657b
| 6,101
|
py
|
Python
|
cogdl/datasets/gtn_data.py
|
YeWR/cogdl
|
5be13cda808c44333f7059db11d13a1d0f190ffa
|
[
"MIT"
] | 1
|
2020-07-20T07:14:50.000Z
|
2020-07-20T07:14:50.000Z
|
cogdl/datasets/gtn_data.py
|
YeWR/cogdl
|
5be13cda808c44333f7059db11d13a1d0f190ffa
|
[
"MIT"
] | null | null | null |
cogdl/datasets/gtn_data.py
|
YeWR/cogdl
|
5be13cda808c44333f7059db11d13a1d0f190ffa
|
[
"MIT"
] | 1
|
2021-06-17T02:44:09.000Z
|
2021-06-17T02:44:09.000Z
|
import sys
import time
import os
import os.path as osp
import requests
import shutil
import tqdm
import pickle
import numpy as np
import torch
from cogdl.data import Data, Dataset, download_url
from . import register_dataset
def untar(path, fname, deleteTar=True):
"""
Unpacks the given archive file to the same directory, then (by default)
deletes the archive file.
"""
print('unpacking ' + fname)
fullpath = os.path.join(path, fname)
shutil.unpack_archive(fullpath, path)
if deleteTar:
os.remove(fullpath)
class GTNDataset(Dataset):
r"""The network datasets "ACM", "DBLP" and "IMDB" from the
`"Graph Transformer Networks"
<https://arxiv.org/abs/1911.06455>`_ paper.
Args:
root (string): Root directory where the dataset should be saved.
name (string): The name of the dataset (:obj:`"gtn-acm"`,
:obj:`"gtn-dblp"`, :obj:`"gtn-imdb"`).
"""
def __init__(self, root, name):
self.name = name
self.url = f'https://github.com/cenyk1230/gtn-data/blob/master/{name}.zip?raw=true'
super(GTNDataset, self).__init__(root)
self.data = torch.load(self.processed_paths[0])
self.num_classes = torch.max(self.data.train_target).item() + 1
self.num_edge = len(self.data.adj)
self.num_nodes = self.data.x.shape[0]
@property
def raw_file_names(self):
names = ["edges.pkl", "labels.pkl", "node_features.pkl"]
return names
@property
def processed_file_names(self):
return ["data.pt"]
def read_gtn_data(self, folder):
edges = pickle.load(open(osp.join(folder, 'edges.pkl'), 'rb'))
labels = pickle.load(open(osp.join(folder, 'labels.pkl'), 'rb'))
node_features = pickle.load(open(osp.join(folder, 'node_features.pkl'), 'rb'))
data = Data()
data.x = torch.from_numpy(node_features).type(torch.FloatTensor)
num_nodes = edges[0].shape[0]
node_type = np.zeros((num_nodes), dtype=int)
assert len(edges)==4
assert len(edges[0].nonzero())==2
node_type[edges[0].nonzero()[0]] = 0
node_type[edges[0].nonzero()[1]] = 1
node_type[edges[1].nonzero()[0]] = 1
node_type[edges[1].nonzero()[1]] = 0
node_type[edges[2].nonzero()[0]] = 0
node_type[edges[2].nonzero()[1]] = 2
node_type[edges[3].nonzero()[0]] = 2
node_type[edges[3].nonzero()[1]] = 0
print(node_type)
data.pos = torch.from_numpy(node_type)
edge_list = []
for i, edge in enumerate(edges):
edge_tmp = torch.from_numpy(np.vstack((edge.nonzero()[0], edge.nonzero()[1]))).type(torch.LongTensor)
edge_list.append(edge_tmp)
data.edge_index = torch.cat(edge_list, 1)
A = []
for i,edge in enumerate(edges):
edge_tmp = torch.from_numpy(np.vstack((edge.nonzero()[0], edge.nonzero()[1]))).type(torch.LongTensor)
value_tmp = torch.ones(edge_tmp.shape[1]).type(torch.FloatTensor)
A.append((edge_tmp,value_tmp))
edge_tmp = torch.stack((torch.arange(0,num_nodes),torch.arange(0,num_nodes))).type(torch.LongTensor)
value_tmp = torch.ones(num_nodes).type(torch.FloatTensor)
A.append((edge_tmp,value_tmp))
data.adj = A
data.train_node = torch.from_numpy(np.array(labels[0])[:,0]).type(torch.LongTensor)
data.train_target = torch.from_numpy(np.array(labels[0])[:,1]).type(torch.LongTensor)
data.valid_node = torch.from_numpy(np.array(labels[1])[:,0]).type(torch.LongTensor)
data.valid_target = torch.from_numpy(np.array(labels[1])[:,1]).type(torch.LongTensor)
data.test_node = torch.from_numpy(np.array(labels[2])[:,0]).type(torch.LongTensor)
data.test_target = torch.from_numpy(np.array(labels[2])[:,1]).type(torch.LongTensor)
y = np.zeros((num_nodes), dtype=int)
x_index = torch.cat((data.train_node, data.valid_node, data.test_node))
y_index = torch.cat((data.train_target, data.valid_target, data.test_target))
y[x_index.numpy()] = y_index.numpy()
data.y = torch.from_numpy(y)
self.data = data
def get(self, idx):
assert idx == 0
return self.data
def apply_to_device(self, device):
self.data.x = self.data.x.to(device)
self.data.train_node = self.data.train_node.to(device)
self.data.valid_node = self.data.valid_node.to(device)
self.data.test_node = self.data.test_node.to(device)
self.data.train_target = self.data.train_target.to(device)
self.data.valid_target = self.data.valid_target.to(device)
self.data.test_target = self.data.test_target.to(device)
new_adj = []
for (t1, t2) in self.data.adj:
new_adj.append((t1.to(device), t2.to(device)))
self.data.adj = new_adj
def download(self):
download_url(self.url, self.raw_dir, name=self.name + '.zip')
untar(self.raw_dir, self.name + '.zip')
def process(self):
self.read_gtn_data(self.raw_dir)
torch.save(self.data, self.processed_paths[0])
def __repr__(self):
return "{}()".format(self.name)
@register_dataset("gtn-acm")
class ACM_GTNDataset(GTNDataset):
def __init__(self):
dataset = "gtn-acm"
path = osp.join(osp.dirname(osp.realpath(__file__)), "../..", "data", dataset)
super(ACM_GTNDataset, self).__init__(path, dataset)
@register_dataset("gtn-dblp")
class DBLP_GTNDataset(GTNDataset):
def __init__(self):
dataset = "gtn-dblp"
path = osp.join(osp.dirname(osp.realpath(__file__)), "../..", "data", dataset)
super(DBLP_GTNDataset, self).__init__(path, dataset)
@register_dataset("gtn-imdb")
class IMDB_GTNDataset(GTNDataset):
def __init__(self):
dataset = "gtn-imdb"
path = osp.join(osp.dirname(osp.realpath(__file__)), "../..", "data", dataset)
super(IMDB_GTNDataset, self).__init__(path, dataset)
| 36.100592
| 113
| 0.626946
|
import sys
import time
import os
import os.path as osp
import requests
import shutil
import tqdm
import pickle
import numpy as np
import torch
from cogdl.data import Data, Dataset, download_url
from . import register_dataset
def untar(path, fname, deleteTar=True):
print('unpacking ' + fname)
fullpath = os.path.join(path, fname)
shutil.unpack_archive(fullpath, path)
if deleteTar:
os.remove(fullpath)
class GTNDataset(Dataset):
def __init__(self, root, name):
self.name = name
self.url = f'https://github.com/cenyk1230/gtn-data/blob/master/{name}.zip?raw=true'
super(GTNDataset, self).__init__(root)
self.data = torch.load(self.processed_paths[0])
self.num_classes = torch.max(self.data.train_target).item() + 1
self.num_edge = len(self.data.adj)
self.num_nodes = self.data.x.shape[0]
@property
def raw_file_names(self):
names = ["edges.pkl", "labels.pkl", "node_features.pkl"]
return names
@property
def processed_file_names(self):
return ["data.pt"]
def read_gtn_data(self, folder):
edges = pickle.load(open(osp.join(folder, 'edges.pkl'), 'rb'))
labels = pickle.load(open(osp.join(folder, 'labels.pkl'), 'rb'))
node_features = pickle.load(open(osp.join(folder, 'node_features.pkl'), 'rb'))
data = Data()
data.x = torch.from_numpy(node_features).type(torch.FloatTensor)
num_nodes = edges[0].shape[0]
node_type = np.zeros((num_nodes), dtype=int)
assert len(edges)==4
assert len(edges[0].nonzero())==2
node_type[edges[0].nonzero()[0]] = 0
node_type[edges[0].nonzero()[1]] = 1
node_type[edges[1].nonzero()[0]] = 1
node_type[edges[1].nonzero()[1]] = 0
node_type[edges[2].nonzero()[0]] = 0
node_type[edges[2].nonzero()[1]] = 2
node_type[edges[3].nonzero()[0]] = 2
node_type[edges[3].nonzero()[1]] = 0
print(node_type)
data.pos = torch.from_numpy(node_type)
edge_list = []
for i, edge in enumerate(edges):
edge_tmp = torch.from_numpy(np.vstack((edge.nonzero()[0], edge.nonzero()[1]))).type(torch.LongTensor)
edge_list.append(edge_tmp)
data.edge_index = torch.cat(edge_list, 1)
A = []
for i,edge in enumerate(edges):
edge_tmp = torch.from_numpy(np.vstack((edge.nonzero()[0], edge.nonzero()[1]))).type(torch.LongTensor)
value_tmp = torch.ones(edge_tmp.shape[1]).type(torch.FloatTensor)
A.append((edge_tmp,value_tmp))
edge_tmp = torch.stack((torch.arange(0,num_nodes),torch.arange(0,num_nodes))).type(torch.LongTensor)
value_tmp = torch.ones(num_nodes).type(torch.FloatTensor)
A.append((edge_tmp,value_tmp))
data.adj = A
data.train_node = torch.from_numpy(np.array(labels[0])[:,0]).type(torch.LongTensor)
data.train_target = torch.from_numpy(np.array(labels[0])[:,1]).type(torch.LongTensor)
data.valid_node = torch.from_numpy(np.array(labels[1])[:,0]).type(torch.LongTensor)
data.valid_target = torch.from_numpy(np.array(labels[1])[:,1]).type(torch.LongTensor)
data.test_node = torch.from_numpy(np.array(labels[2])[:,0]).type(torch.LongTensor)
data.test_target = torch.from_numpy(np.array(labels[2])[:,1]).type(torch.LongTensor)
y = np.zeros((num_nodes), dtype=int)
x_index = torch.cat((data.train_node, data.valid_node, data.test_node))
y_index = torch.cat((data.train_target, data.valid_target, data.test_target))
y[x_index.numpy()] = y_index.numpy()
data.y = torch.from_numpy(y)
self.data = data
def get(self, idx):
assert idx == 0
return self.data
def apply_to_device(self, device):
self.data.x = self.data.x.to(device)
self.data.train_node = self.data.train_node.to(device)
self.data.valid_node = self.data.valid_node.to(device)
self.data.test_node = self.data.test_node.to(device)
self.data.train_target = self.data.train_target.to(device)
self.data.valid_target = self.data.valid_target.to(device)
self.data.test_target = self.data.test_target.to(device)
new_adj = []
for (t1, t2) in self.data.adj:
new_adj.append((t1.to(device), t2.to(device)))
self.data.adj = new_adj
def download(self):
download_url(self.url, self.raw_dir, name=self.name + '.zip')
untar(self.raw_dir, self.name + '.zip')
def process(self):
self.read_gtn_data(self.raw_dir)
torch.save(self.data, self.processed_paths[0])
def __repr__(self):
return "{}()".format(self.name)
@register_dataset("gtn-acm")
class ACM_GTNDataset(GTNDataset):
def __init__(self):
dataset = "gtn-acm"
path = osp.join(osp.dirname(osp.realpath(__file__)), "../..", "data", dataset)
super(ACM_GTNDataset, self).__init__(path, dataset)
@register_dataset("gtn-dblp")
class DBLP_GTNDataset(GTNDataset):
def __init__(self):
dataset = "gtn-dblp"
path = osp.join(osp.dirname(osp.realpath(__file__)), "../..", "data", dataset)
super(DBLP_GTNDataset, self).__init__(path, dataset)
@register_dataset("gtn-imdb")
class IMDB_GTNDataset(GTNDataset):
def __init__(self):
dataset = "gtn-imdb"
path = osp.join(osp.dirname(osp.realpath(__file__)), "../..", "data", dataset)
super(IMDB_GTNDataset, self).__init__(path, dataset)
| true
| true
|
7906b826eedb6c679be883125d7818f139c7bc16
| 2,365
|
py
|
Python
|
jedeschule/spiders/brandenburg.py
|
MartinGer/jedeschule-scraper
|
107a3f5c907c5e1b232813a31bfdea90586e9424
|
[
"MIT"
] | 1
|
2021-11-07T08:28:32.000Z
|
2021-11-07T08:28:32.000Z
|
jedeschule/spiders/brandenburg.py
|
canbuffi/jedeschule-scraper
|
ec3c23d9e90a2bc65786fdc8b3ba0951b82c343a
|
[
"MIT"
] | null | null | null |
jedeschule/spiders/brandenburg.py
|
canbuffi/jedeschule-scraper
|
ec3c23d9e90a2bc65786fdc8b3ba0951b82c343a
|
[
"MIT"
] | null | null | null |
from typing import List, Optional
import scrapy
from scrapy import Item
from jedeschule.items import School
from jedeschule.spiders.school_spider import SchoolSpider
def first_or_none(item: List) -> Optional[str]:
try:
return item[0]
except IndexError:
return None
class BrandenburgSpider(SchoolSpider):
name = "brandenburg"
start_urls = ['https://bildung-brandenburg.de/schulportraets/index.php?id=uebersicht']
def parse(self, response):
for link in response.xpath('/html/body/div/div[5]/div[2]/div/div[2]/table/tbody/tr/td/a/@href').getall():
yield scrapy.Request(response.urljoin(link), callback=self.parse_details)
def parse_details(self, response):
table = response.xpath('//*[@id="c"]/div/table')
data = {
# extract the school ID from the URL
'id': response.url.rsplit('=', 1)[1],
'data_url': response.url
}
for tr in table.css('tr:not(:first-child)'):
key = tr.css('th ::text').get().replace(':', '').strip()
value = tr.css('td ::text').getall()
data[key] = [self.fix_data(part) for part in value]
yield data
def fix_data(self, string):
"""
fix wrong tabs, spaces and backslashes
fix @ in email addresses
"""
if string is None:
return None
string = ' '.join(string.split())
return string.replace('\\', '').replace('|at|','@').strip()
@staticmethod
def normalize(item: Item) -> School:
*name, street, place = item.get('Adresse')
zip_code, *city_parts = place.split(" ")
return School(name=' '.join(name),
id='BB-{}'.format(item.get('id')),
address=street,
zip=zip_code,
city=' '.join(city_parts),
website=first_or_none(item.get('Internet')),
email=first_or_none(item.get('E-Mail')),
school_type=first_or_none(item.get('Schulform')),
provider=first_or_none(item.get('Schulamt')),
fax=first_or_none(item.get('Fax')),
phone=first_or_none(item.get('Telefon')),
director=first_or_none(item.get('Schulleiter/in')))
| 36.953125
| 113
| 0.556871
|
from typing import List, Optional
import scrapy
from scrapy import Item
from jedeschule.items import School
from jedeschule.spiders.school_spider import SchoolSpider
def first_or_none(item: List) -> Optional[str]:
try:
return item[0]
except IndexError:
return None
class BrandenburgSpider(SchoolSpider):
name = "brandenburg"
start_urls = ['https://bildung-brandenburg.de/schulportraets/index.php?id=uebersicht']
def parse(self, response):
for link in response.xpath('/html/body/div/div[5]/div[2]/div/div[2]/table/tbody/tr/td/a/@href').getall():
yield scrapy.Request(response.urljoin(link), callback=self.parse_details)
def parse_details(self, response):
table = response.xpath('//*[@id="c"]/div/table')
data = {
'id': response.url.rsplit('=', 1)[1],
'data_url': response.url
}
for tr in table.css('tr:not(:first-child)'):
key = tr.css('th ::text').get().replace(':', '').strip()
value = tr.css('td ::text').getall()
data[key] = [self.fix_data(part) for part in value]
yield data
def fix_data(self, string):
if string is None:
return None
string = ' '.join(string.split())
return string.replace('\\', '').replace('|at|','@').strip()
@staticmethod
def normalize(item: Item) -> School:
*name, street, place = item.get('Adresse')
zip_code, *city_parts = place.split(" ")
return School(name=' '.join(name),
id='BB-{}'.format(item.get('id')),
address=street,
zip=zip_code,
city=' '.join(city_parts),
website=first_or_none(item.get('Internet')),
email=first_or_none(item.get('E-Mail')),
school_type=first_or_none(item.get('Schulform')),
provider=first_or_none(item.get('Schulamt')),
fax=first_or_none(item.get('Fax')),
phone=first_or_none(item.get('Telefon')),
director=first_or_none(item.get('Schulleiter/in')))
| true
| true
|
7906ba67702c082084572deb9f733367c5f26f5d
| 1,964
|
py
|
Python
|
check_kubernetes_health.py
|
adolci/nagios-plugins
|
0d8cee0376467922b3315e9b0e08b98454eb9853
|
[
"IBM-pibs",
"Apache-1.1"
] | null | null | null |
check_kubernetes_health.py
|
adolci/nagios-plugins
|
0d8cee0376467922b3315e9b0e08b98454eb9853
|
[
"IBM-pibs",
"Apache-1.1"
] | null | null | null |
check_kubernetes_health.py
|
adolci/nagios-plugins
|
0d8cee0376467922b3315e9b0e08b98454eb9853
|
[
"IBM-pibs",
"Apache-1.1"
] | 3
|
2019-07-25T11:46:32.000Z
|
2019-12-17T05:01:03.000Z
|
#!/usr/bin/env python
# coding=utf-8
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2019-02-26 18:30:53 +0000 (Tue, 26 Feb 2019)
#
# https://github.com/harisekhon/nagios-plugins
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
"""
Nagios Plugin to check the health status of Kubernetes via its API
Tested on Kubernetes 1.13
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import traceback
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from harisekhon import RestNagiosPlugin
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.1'
class CheckKubernetesHealth(RestNagiosPlugin):
def __init__(self):
# Python 2.x
super(CheckKubernetesHealth, self).__init__()
# Python 3.x
# super().__init__()
self.name = 'Kubernetes API'
self.default_port = 8001
# or just /healthz
self.path = '/healthz/ping'
self.auth = 'optional'
self.json = False
self.msg = 'Kubernetes msg not defined yet'
#def add_options(self):
# super(CheckKubernetesHealth, self).add_options()
def process_options(self):
super(CheckKubernetesHealth, self).process_options()
self.no_args()
def parse(self, req):
content = req.content
if content != 'ok':
self.critical()
self.msg = "Kubernetes health = '{}'".format(content)
if __name__ == '__main__':
CheckKubernetesHealth().main()
| 24.860759
| 77
| 0.678717
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import traceback
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
from harisekhon import RestNagiosPlugin
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.1'
class CheckKubernetesHealth(RestNagiosPlugin):
def __init__(self):
super(CheckKubernetesHealth, self).__init__()
self.name = 'Kubernetes API'
self.default_port = 8001
self.path = '/healthz/ping'
self.auth = 'optional'
self.json = False
self.msg = 'Kubernetes msg not defined yet'
def process_options(self):
super(CheckKubernetesHealth, self).process_options()
self.no_args()
def parse(self, req):
content = req.content
if content != 'ok':
self.critical()
self.msg = "Kubernetes health = '{}'".format(content)
if __name__ == '__main__':
CheckKubernetesHealth().main()
| true
| true
|
7906badac10c4f17011c51caf1b4ac03048d0a89
| 4,102
|
py
|
Python
|
my_cv/utils/cv2_util.py
|
strawsyz/straw
|
db313c78c2e3c0355cd10c70ac25a15bb5632d41
|
[
"MIT"
] | 2
|
2020-04-06T09:09:19.000Z
|
2020-07-24T03:59:55.000Z
|
my_cv/utils/cv2_util.py
|
strawsyz/straw
|
db313c78c2e3c0355cd10c70ac25a15bb5632d41
|
[
"MIT"
] | null | null | null |
my_cv/utils/cv2_util.py
|
strawsyz/straw
|
db313c78c2e3c0355cd10c70ac25a15bb5632d41
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
from PIL import Image
def draw_approx_polyDP(cnt, epsilon=0.01, closed=True):
"""用多边形来近似的表示曲线"""
epsilon = epsilon * cv2.arcLength(cnt, closed) # 得到轮廓的周长信息作为参考值
return cv2.approxPolyDP(cnt, epsilon, closed) # 得到近似多边形框
def draw_convex_hull(cnt):
"""画凸包,传入的是一些点"""
return cv2.convexHull(cnt) # 获取处理过的轮廓信息
def show_img(file_name, window_name='win'):
img = cv2.imread(file_name)
cv2.imshow(window_name, img)
# 按任意键,图片消失
cv2.waitKey()
cv2.destroyAllWindows()
def camera_show(window_name='camera'):
"""最好在改进一下关闭窗口部分的功能
建立一个窗口捕捉摄像头显示的内容
当左键点击过窗口,且按过任意键盘键,才会退出窗口"""
clicked = False
camera_capture = cv2.VideoCapture(0)
def on_mouse(event, x, y, flags, param):
global clicked
if event == cv2.EVENT_LBUTTONUP:
clicked = True
cv2.namedWindow(window_name)
cv2.setMouseCallback(window_name, on_mouse)
success, frame = camera_capture.read()
# cv2.waitKey(1) 参数表示等待键盘触发的时间,返回值为-1表示没有见按下
while success and cv2.waitKey(1) == -1 and not clicked:
cv2.imshow(window_name, frame)
success, frame = camera_capture.read()
cv2.destroyAllWindows()
camera_capture.release()
def camera_save(file_name, seconds=3, fps=60):
# 获得设备
camera_capture = cv2.VideoCapture(0)
size = (int(camera_capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(camera_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
video_writer = cv2.VideoWriter(file_name, cv2.VideoWriter_fourcc('I', '4', '2', '0'), fps, size)
success, frame = camera_capture.read()
num_frames_remaining = seconds * fps - 1
while success and num_frames_remaining > 0:
video_writer.write(frame)
success, frame = camera_capture.read()
num_frames_remaining -= 1
camera_capture.release()
def copy(orig_img, start_height, start_width, part):
height, width = part.shape
orig_img[start_height: start_height + height, start_width: start_width + width] = part
return orig_img
def draw_gray_random(height, width):
flat_numpy_array = np.random.randint(0, 256, height * width)
gray_image = flat_numpy_array.reshape(height, width)
return gray_image
def draw_random(height, width, channel=3):
flat_numpy_array = np.random.randint(0, 256, height * width * channel)
bgr_image = flat_numpy_array.reshape((height, width, channel))
return bgr_image
def draw_gray_black(height, width):
img = np.zeros((height, width), dtype=np.uint8)
return img
def draw_line(img, x1, y1, x2, y2, color=(0, 255, 0), thickness=2):
return cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def draw_rectangle(img, box, contour_idx=0, color=(0, 0, 255), thickness=3):
return cv2.drawContours(img, box, contour_idx, color, thickness)
def draw_cicile(img, center, radius, color=(0, 255, 0), thickness=2):
return cv2.circle(img, center, radius, color, thickness)
def draw_black(height, width):
img = draw_black(height, width)
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
return img
def img2array(img):
return bytearray(img)
def array_img(arr, height, width, channel=3):
return np.array(arr).reshape(height, width, channel)
def array2img_gray(arr, height, width):
return np.array(arr).reshape(height, width)
if __name__ == '__main__':
img = cv2.imread('sphere.png')
cv2.imshow('win', img)
# empire = Image.open('sphere.png')
# cv2.waitKey()
# cv2.destroyAllWindows()
# print(empire.shape())
# empire.convert('RGB')
# print(empire.mode)
# print(empire.shape())
img = Image.open('sphere.png')
img = img.resize((137, 137))
# 将黑色的部分变为透明
print(img.info)
print(img.mode)
img = img.convert("RGBA")
print(img.mode)
width = img.size[0]
height = img.size[1]
for x in range(width):
for y in range(height):
r, g, b, a = img.getpixel((x, y))
rgba = (r, g, b, a)
if (r == g == b == 0):
img.putpixel((x, y), (0, 0, 0, 0))
img.save('sphere_2.png')
img.show()
| 27.904762
| 100
| 0.661628
|
import cv2
import numpy as np
from PIL import Image
def draw_approx_polyDP(cnt, epsilon=0.01, closed=True):
epsilon = epsilon * cv2.arcLength(cnt, closed)
return cv2.approxPolyDP(cnt, epsilon, closed)
def draw_convex_hull(cnt):
return cv2.convexHull(cnt)
def show_img(file_name, window_name='win'):
img = cv2.imread(file_name)
cv2.imshow(window_name, img)
cv2.waitKey()
cv2.destroyAllWindows()
def camera_show(window_name='camera'):
clicked = False
camera_capture = cv2.VideoCapture(0)
def on_mouse(event, x, y, flags, param):
global clicked
if event == cv2.EVENT_LBUTTONUP:
clicked = True
cv2.namedWindow(window_name)
cv2.setMouseCallback(window_name, on_mouse)
success, frame = camera_capture.read()
while success and cv2.waitKey(1) == -1 and not clicked:
cv2.imshow(window_name, frame)
success, frame = camera_capture.read()
cv2.destroyAllWindows()
camera_capture.release()
def camera_save(file_name, seconds=3, fps=60):
camera_capture = cv2.VideoCapture(0)
size = (int(camera_capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(camera_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
video_writer = cv2.VideoWriter(file_name, cv2.VideoWriter_fourcc('I', '4', '2', '0'), fps, size)
success, frame = camera_capture.read()
num_frames_remaining = seconds * fps - 1
while success and num_frames_remaining > 0:
video_writer.write(frame)
success, frame = camera_capture.read()
num_frames_remaining -= 1
camera_capture.release()
def copy(orig_img, start_height, start_width, part):
height, width = part.shape
orig_img[start_height: start_height + height, start_width: start_width + width] = part
return orig_img
def draw_gray_random(height, width):
flat_numpy_array = np.random.randint(0, 256, height * width)
gray_image = flat_numpy_array.reshape(height, width)
return gray_image
def draw_random(height, width, channel=3):
flat_numpy_array = np.random.randint(0, 256, height * width * channel)
bgr_image = flat_numpy_array.reshape((height, width, channel))
return bgr_image
def draw_gray_black(height, width):
img = np.zeros((height, width), dtype=np.uint8)
return img
def draw_line(img, x1, y1, x2, y2, color=(0, 255, 0), thickness=2):
return cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def draw_rectangle(img, box, contour_idx=0, color=(0, 0, 255), thickness=3):
return cv2.drawContours(img, box, contour_idx, color, thickness)
def draw_cicile(img, center, radius, color=(0, 255, 0), thickness=2):
return cv2.circle(img, center, radius, color, thickness)
def draw_black(height, width):
img = draw_black(height, width)
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
return img
def img2array(img):
return bytearray(img)
def array_img(arr, height, width, channel=3):
return np.array(arr).reshape(height, width, channel)
def array2img_gray(arr, height, width):
return np.array(arr).reshape(height, width)
if __name__ == '__main__':
img = cv2.imread('sphere.png')
cv2.imshow('win', img)
img = Image.open('sphere.png')
img = img.resize((137, 137))
print(img.info)
print(img.mode)
img = img.convert("RGBA")
print(img.mode)
width = img.size[0]
height = img.size[1]
for x in range(width):
for y in range(height):
r, g, b, a = img.getpixel((x, y))
rgba = (r, g, b, a)
if (r == g == b == 0):
img.putpixel((x, y), (0, 0, 0, 0))
img.save('sphere_2.png')
img.show()
| true
| true
|
7906bc27a9fc98555f24a38f4ddded576b827768
| 8,569
|
py
|
Python
|
ginga/util/io_fits.py
|
Rbeaty88/ginga
|
08451a81288b8defc54aa9f9e2af23a9ba32e985
|
[
"BSD-3-Clause"
] | 1
|
2016-03-21T15:56:15.000Z
|
2016-03-21T15:56:15.000Z
|
ginga/util/io_fits.py
|
Rbeaty88/ginga
|
08451a81288b8defc54aa9f9e2af23a9ba32e985
|
[
"BSD-3-Clause"
] | null | null | null |
ginga/util/io_fits.py
|
Rbeaty88/ginga
|
08451a81288b8defc54aa9f9e2af23a9ba32e985
|
[
"BSD-3-Clause"
] | null | null | null |
#
# io_fits.py -- Module wrapper for loading FITS files.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
There are two possible choices for a python FITS file reading package
compatible with Ginga: astropy/pyfits and fitsio. Both are based on
the CFITSIO library, although it seems that astropy's version has
changed quite a bit from the original, while fitsio is still tracking
the current version.
To force the use of one, do:
from ginga.util import io_fits
io_fits.use('package')
(replace 'package' with one of {'astropy', 'fitsio'}) before you load
any images. Otherwise Ginga will try to pick one for you.
"""
import numpy
fits_configured = False
fitsLoaderClass = None
have_pyfits = False
have_fitsio = False
class FITSError(Exception):
pass
def use(fitspkg, raise_err=True):
global fits_configured, fitsLoaderClass, \
have_pyfits, pyfits, \
have_fitsio, fitsio
if fitspkg == 'astropy':
try:
from astropy.io import fits as pyfits
have_pyfits = True
fitsLoaderClass = PyFitsFileHandler
return True
except ImportError:
try:
# maybe they have a standalone version of pyfits?
import pyfits
have_pyfits = True
fitsLoaderClass = PyFitsFileHandler
return True
except ImportError as e:
if raise_err:
raise
return False
elif fitspkg == 'fitsio':
try:
import fitsio
have_fitsio = True
fitsLoaderClass = FitsioFileHandler
return True
except ImportError as e:
if raise_err:
raise
return False
return False
class BaseFitsFileHandler(object):
pass
class PyFitsFileHandler(BaseFitsFileHandler):
def __init__(self, logger):
super(PyFitsFileHandler, self).__init__()
if not have_pyfits:
raise FITSError("Need astropy or pyfits module installed to use this file handler")
self.logger = logger
self.kind = 'pyfits'
def fromHDU(self, hdu, ahdr):
header = hdu.header
if hasattr(header, 'cards'):
#newer astropy.io.fits don't have ascardlist
for card in header.cards:
bnch = ahdr.__setitem__(card.key, card.value)
bnch.comment = card.comment
else:
for card in header.ascardlist():
bnch = ahdr.__setitem__(card.key, card.value)
bnch.comment = card.comment
def load_hdu(self, hdu, ahdr, fobj=None, naxispath=None):
data = hdu.data
if len(data.shape) < 2:
# Expand 1D arrays into 1xN array
data = data.reshape((1, data.shape[0]))
else:
# Drill down to 2D data slice
if not naxispath:
naxispath = ([0] * (len(data.shape)-2))
for idx in naxispath:
data = data[idx]
self.fromHDU(hdu, ahdr)
return (data, naxispath)
def load_file(self, filespec, ahdr, numhdu=None, naxispath=None):
filepath = get_path(filespec)
self.logger.info("Loading file '%s' ..." % (filepath))
fits_f = pyfits.open(filepath, 'readonly')
# this seems to be necessary now for some fits files...
try:
fits_f.verify('fix')
except Exception, e:
raise FITSError("Error loading fits file '%s': %s" % (
fitspath, str(e)))
if numhdu == None:
found_valid_hdu = False
for i in range(len(fits_f)):
hdu = fits_f[i]
if hdu.data == None:
# compressed FITS file or non-pixel data hdu?
continue
if not isinstance(hdu.data, numpy.ndarray):
# We need to open a numpy array
continue
#print "data type is %s" % hdu.data.dtype.kind
# Looks good, let's try it
found_valid_hdu = True
break
if not found_valid_hdu:
raise FITSError("No data HDU found that Ginga can open in '%s'" % (
filepath))
else:
hdu = fits_f[numhdu]
data, naxispath = self.load_hdu(hdu, ahdr, fobj=fits_f,
naxispath=naxispath)
fits_f.close()
return (data, naxispath)
def create_fits(self, data, header):
fits_f = pyfits.HDUList()
hdu = pyfits.PrimaryHDU()
hdu.data = data
for kwd in header.keys():
card = header.get_card(kwd)
hdu.header.update(card.key, card.value, comment=card.comment)
fits_f.append(hdu)
return fits_f
def write_fits(self, path, data, header, **kwdargs):
fits_f = self.create_fits(data, header)
fits_f.writeto(path, **kwdargs)
fits_f.close()
def save_as_file(self, path, data, header, **kwdargs):
self.write_fits(filepath, data, header, **kwdargs)
class FitsioFileHandler(BaseFitsFileHandler):
def __init__(self, logger):
super(FitsioFileHandler, self).__init__()
if not have_fitsio:
raise FITSError("Need fitsio module installed to use this file handler")
self.logger = logger
self.kind = 'fitsio'
def fromHDU(self, hdu, ahdr):
header = hdu.read_header()
for d in header.records():
bnch = ahdr.__setitem__(d['name'], d['value'])
bnch.comment = d['comment']
def load_hdu(self, hdu, ahdr, fobj=None, naxispath=None):
data = hdu.read()
if len(data.shape) < 2:
# Expand 1D arrays into 1xN array
data = data.reshape((1, data.shape[0]))
else:
# Drill down to 2D data slice
if not naxispath:
naxispath = ([0] * (len(data.shape)-2))
for idx in naxispath:
data = data[idx]
self.fromHDU(hdu, ahdr)
return (data, naxispath)
def load_file(self, filespec, ahdr, numhdu=None, naxispath=None):
filepath = get_path(filespec)
self.logger.info("Loading file '%s' ..." % (filepath))
fits_f = fitsio.FITS(filepath)
if numhdu == None:
found_valid_hdu = False
for i in range(len(fits_f)):
hdu = fits_f[i]
info = hdu.get_info()
if not ('ndims' in info) or (info['ndims'] == 0):
# compressed FITS file or non-pixel data hdu?
continue
#print "data type is %s" % hdu.data.dtype.kind
# Looks good, let's try it
found_valid_hdu = True
break
if not found_valid_hdu:
raise FITSError("No data HDU found that Ginga can open in '%s'" % (
filepath))
else:
hdu = fits_f[numhdu]
data, naxispath = self.load_hdu(hdu, ahdr, fobj=fits_f,
naxispath=naxispath)
fits_f.close()
return (data, naxispath)
def create_fits(self, data, header):
fits_f = pyfits.HDUList()
hdu = pyfits.PrimaryHDU()
hdu.data = data
for kwd in header.keys():
card = header.get_card(kwd)
hdu.header.update(card.key, card.value, comment=card.comment)
fits_f.append(hdu)
return fits_f
def write_fits(self, path, data, header):
fits_f = fitsio.FITS(path, 'rw')
fits_f = self.create_fits(data, header)
fits_f.writeto(path, output_verify='fix')
fits_f.close()
def save_as_file(self, path, data, header, **kwdargs):
self.write_fits(filepath, data, header, **kwdargs)
def get_path(fileSpec):
path = fileSpec
if fileSpec.startswith('file://'):
path = fileSpec[7:]
# TODO: handle web references by fetching the file
return path
# default
fitsLoaderClass = PyFitsFileHandler
# try to use them in this order
# astropy is faster
for name in ('astropy', 'fitsio'):
if use(name, raise_err=True):
break
def get_fitsloader(kind=None, logger=None):
return fitsLoaderClass(logger)
#END
| 30.494662
| 95
| 0.566227
|
"""
There are two possible choices for a python FITS file reading package
compatible with Ginga: astropy/pyfits and fitsio. Both are based on
the CFITSIO library, although it seems that astropy's version has
changed quite a bit from the original, while fitsio is still tracking
the current version.
To force the use of one, do:
from ginga.util import io_fits
io_fits.use('package')
(replace 'package' with one of {'astropy', 'fitsio'}) before you load
any images. Otherwise Ginga will try to pick one for you.
"""
import numpy
fits_configured = False
fitsLoaderClass = None
have_pyfits = False
have_fitsio = False
class FITSError(Exception):
pass
def use(fitspkg, raise_err=True):
global fits_configured, fitsLoaderClass, \
have_pyfits, pyfits, \
have_fitsio, fitsio
if fitspkg == 'astropy':
try:
from astropy.io import fits as pyfits
have_pyfits = True
fitsLoaderClass = PyFitsFileHandler
return True
except ImportError:
try:
# maybe they have a standalone version of pyfits?
import pyfits
have_pyfits = True
fitsLoaderClass = PyFitsFileHandler
return True
except ImportError as e:
if raise_err:
raise
return False
elif fitspkg == 'fitsio':
try:
import fitsio
have_fitsio = True
fitsLoaderClass = FitsioFileHandler
return True
except ImportError as e:
if raise_err:
raise
return False
return False
class BaseFitsFileHandler(object):
pass
class PyFitsFileHandler(BaseFitsFileHandler):
def __init__(self, logger):
super(PyFitsFileHandler, self).__init__()
if not have_pyfits:
raise FITSError("Need astropy or pyfits module installed to use this file handler")
self.logger = logger
self.kind = 'pyfits'
def fromHDU(self, hdu, ahdr):
header = hdu.header
if hasattr(header, 'cards'):
#newer astropy.io.fits don't have ascardlist
for card in header.cards:
bnch = ahdr.__setitem__(card.key, card.value)
bnch.comment = card.comment
else:
for card in header.ascardlist():
bnch = ahdr.__setitem__(card.key, card.value)
bnch.comment = card.comment
def load_hdu(self, hdu, ahdr, fobj=None, naxispath=None):
data = hdu.data
if len(data.shape) < 2:
data = data.reshape((1, data.shape[0]))
else:
if not naxispath:
naxispath = ([0] * (len(data.shape)-2))
for idx in naxispath:
data = data[idx]
self.fromHDU(hdu, ahdr)
return (data, naxispath)
def load_file(self, filespec, ahdr, numhdu=None, naxispath=None):
filepath = get_path(filespec)
self.logger.info("Loading file '%s' ..." % (filepath))
fits_f = pyfits.open(filepath, 'readonly')
try:
fits_f.verify('fix')
except Exception, e:
raise FITSError("Error loading fits file '%s': %s" % (
fitspath, str(e)))
if numhdu == None:
found_valid_hdu = False
for i in range(len(fits_f)):
hdu = fits_f[i]
if hdu.data == None:
continue
if not isinstance(hdu.data, numpy.ndarray):
continue
found_valid_hdu = True
break
if not found_valid_hdu:
raise FITSError("No data HDU found that Ginga can open in '%s'" % (
filepath))
else:
hdu = fits_f[numhdu]
data, naxispath = self.load_hdu(hdu, ahdr, fobj=fits_f,
naxispath=naxispath)
fits_f.close()
return (data, naxispath)
def create_fits(self, data, header):
fits_f = pyfits.HDUList()
hdu = pyfits.PrimaryHDU()
hdu.data = data
for kwd in header.keys():
card = header.get_card(kwd)
hdu.header.update(card.key, card.value, comment=card.comment)
fits_f.append(hdu)
return fits_f
def write_fits(self, path, data, header, **kwdargs):
fits_f = self.create_fits(data, header)
fits_f.writeto(path, **kwdargs)
fits_f.close()
def save_as_file(self, path, data, header, **kwdargs):
self.write_fits(filepath, data, header, **kwdargs)
class FitsioFileHandler(BaseFitsFileHandler):
def __init__(self, logger):
super(FitsioFileHandler, self).__init__()
if not have_fitsio:
raise FITSError("Need fitsio module installed to use this file handler")
self.logger = logger
self.kind = 'fitsio'
def fromHDU(self, hdu, ahdr):
header = hdu.read_header()
for d in header.records():
bnch = ahdr.__setitem__(d['name'], d['value'])
bnch.comment = d['comment']
def load_hdu(self, hdu, ahdr, fobj=None, naxispath=None):
data = hdu.read()
if len(data.shape) < 2:
# Expand 1D arrays into 1xN array
data = data.reshape((1, data.shape[0]))
else:
# Drill down to 2D data slice
if not naxispath:
naxispath = ([0] * (len(data.shape)-2))
for idx in naxispath:
data = data[idx]
self.fromHDU(hdu, ahdr)
return (data, naxispath)
def load_file(self, filespec, ahdr, numhdu=None, naxispath=None):
filepath = get_path(filespec)
self.logger.info("Loading file '%s' ..." % (filepath))
fits_f = fitsio.FITS(filepath)
if numhdu == None:
found_valid_hdu = False
for i in range(len(fits_f)):
hdu = fits_f[i]
info = hdu.get_info()
if not ('ndims' in info) or (info['ndims'] == 0):
# compressed FITS file or non-pixel data hdu?
continue
#print "data type is %s" % hdu.data.dtype.kind
# Looks good, let's try it
found_valid_hdu = True
break
if not found_valid_hdu:
raise FITSError("No data HDU found that Ginga can open in '%s'" % (
filepath))
else:
hdu = fits_f[numhdu]
data, naxispath = self.load_hdu(hdu, ahdr, fobj=fits_f,
naxispath=naxispath)
fits_f.close()
return (data, naxispath)
def create_fits(self, data, header):
fits_f = pyfits.HDUList()
hdu = pyfits.PrimaryHDU()
hdu.data = data
for kwd in header.keys():
card = header.get_card(kwd)
hdu.header.update(card.key, card.value, comment=card.comment)
fits_f.append(hdu)
return fits_f
def write_fits(self, path, data, header):
fits_f = fitsio.FITS(path, 'rw')
fits_f = self.create_fits(data, header)
fits_f.writeto(path, output_verify='fix')
fits_f.close()
def save_as_file(self, path, data, header, **kwdargs):
self.write_fits(filepath, data, header, **kwdargs)
def get_path(fileSpec):
path = fileSpec
if fileSpec.startswith('file://'):
path = fileSpec[7:]
return path
fitsLoaderClass = PyFitsFileHandler
for name in ('astropy', 'fitsio'):
if use(name, raise_err=True):
break
def get_fitsloader(kind=None, logger=None):
return fitsLoaderClass(logger)
| false
| true
|
7906bd00579953df340b1dd174133b25cd063576
| 3,165
|
py
|
Python
|
a_full_model.py
|
PiotrKrasnowski/Speech_Encryption
|
305a01b82aabb03bedc9036dd69fe18df90ef57b
|
[
"MIT"
] | null | null | null |
a_full_model.py
|
PiotrKrasnowski/Speech_Encryption
|
305a01b82aabb03bedc9036dd69fe18df90ef57b
|
[
"MIT"
] | null | null | null |
a_full_model.py
|
PiotrKrasnowski/Speech_Encryption
|
305a01b82aabb03bedc9036dd69fe18df90ef57b
|
[
"MIT"
] | 1
|
2021-05-01T09:36:48.000Z
|
2021-05-01T09:36:48.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import time
from copy import copy
import os
from single_pitch import single_pitch
from channel import channel
from pseudo_speech import Pseudospeech_Synthetizer_class
from encryption import Encryption_class
from speech_analyzer import Speech_Analyzer_class
from speech_synthesizer import Speech_Synthesizer_class
################################################################
my_analyzer = Speech_Analyzer_class("speech_model.npz","spherical_code.npz") # model parameters generated by speech_model.py and spherical_code.py
my_encryptor = Encryption_class("spherical_code.npz") # model parameters generated by spherical_code.py
my_ps_sp_synthetizer = Pseudospeech_Synthetizer_class("pseudospeech_model.npz","spherical_code.npz") # model parameters generated by pseudo_speech_model.py and spherical_code.py
my_sp_synthesizer = Speech_Synthesizer_class("speech_model.npz") # model parameters generated by speech_model.py
# pseudo random data used for enciphering/deciphering
keybits = np.random.randint(2, size = (160, 10000))
print("step 1")
speech_samples = np.fromfile("temp/hts1a.raw", dtype='int16')
# print(speech_samples.shape)
##### SPEECH ENCODING ######
print("step 2")
pitch_indices, energy_indices, timbre_indices = my_analyzer.analyze_speech(speech_samples)
###### ENCRYPTION ######
print("step 3")
pitch_indices_enc, energy_indices_enc, timbre_indices_enc = my_encryptor.speech_encryption(pitch_indices, energy_indices, timbre_indices, keybits)
###### PSEUDOSPEECH SYNTHESIS ######
print("step 4")
signal = my_synthetizer.synthesize_pseudospeech(pitch_indices_enc, energy_indices_enc, timbre_indices_enc)
###### CHANNEL DISTORTION ######
print("step 5")
signal_rec = channel(signal, "SILK", 16000, 48000) # data samples, codec type, sampling frequency (Hz), compression rate (b/s)
###### PSEUDOSPEECH ANALYSIS ######
print("step 6")
pitch_indices_rec, energy_indices_rec, timbre_indices_rec = my_synthetizer.analyze_pseudospeech(signal_rec)
# ###### DECRYPTION ######
print("step 7")
pitch_indices_dec, energy_indices_dec, timbre_indices_dec = my_encryptor.speech_decryption(pitch_indices_rec, energy_indices_rec, timbre_indices_rec, keybits)
# ###### SPEECH SYNTHESIS ######
print("step 8")
my_speech_synthesizer.synthesize_speech(pitch_indices_dec, energy_indices_dec, timbre_indices_dec) # save to file / input of the narrowband (8kHz) LPCNet
print("Finished")
################
# plt.figure()
# plt.plot(energy_indices)
# plt.figure()
# plt.plot(pitch_indices)
# plt.figure()
# plt.plot(np.transpose(timbre_indices))
################
# plt.figure()
# plt.plot(energy_indices_enc)
# plt.figure()
# plt.plot(pitch_indices_enc)
# plt.figure()
# plt.plot(np.transpose(timbre_indices_enc))
################
# plt.figure()
# plt.plot(energy_indices_rec)
# plt.figure()
# plt.plot(pitch_indices_rec)
# plt.figure()
# plt.plot(np.transpose(timbre_indices_rec))
################
# plt.figure()
# plt.plot(energy_indices_dec)
# plt.figure()
# plt.plot(pitch_indices_dec)
# plt.figure()
# plt.plot(np.transpose(timbre_indices_dec))
################
plt.show()
| 29.579439
| 177
| 0.740916
|
import numpy as np
import matplotlib.pyplot as plt
import time
from copy import copy
import os
from single_pitch import single_pitch
from channel import channel
from pseudo_speech import Pseudospeech_Synthetizer_class
from encryption import Encryption_class
from speech_analyzer import Speech_Analyzer_class
from speech_synthesizer import Speech_Synthesizer_class
| true
| true
|
7906bd059365b8b3b4c837fe3a8ac573659593ac
| 2,249
|
py
|
Python
|
scent.py
|
jacebrowning/AI-WS
|
31942e85233b5d55f52f668daf9ef91d168e91b6
|
[
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null |
scent.py
|
jacebrowning/AI-WS
|
31942e85233b5d55f52f668daf9ef91d168e91b6
|
[
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null |
scent.py
|
jacebrowning/AI-WS
|
31942e85233b5d55f52f668daf9ef91d168e91b6
|
[
"Apache-2.0",
"BSD-2-Clause"
] | 9
|
2018-01-04T05:32:39.000Z
|
2018-03-24T02:41:28.000Z
|
# -*- coding: utf-8 -*-
"""Configuration file for sniffer."""
# pylint: disable=superfluous-parens,bad-continuation
import time
import subprocess
from sniffer.api import select_runnable, file_validator, runnable
try:
from pync import Notifier
except ImportError:
notify = None
else:
notify = Notifier.notify
watch_paths = ["flask_api"]
class Options(object):
group = int(time.time()) # unique per run
show_coverage = False
rerun_args = None
targets = [
(('make', 'test'), "Run Tests", True),
(('make', 'check'), "Static Analysis", True),
(('make', 'doc'), None, True),
]
@select_runnable('run_targets')
@file_validator
def python_files(filename):
return filename.endswith('.py')
@select_runnable('run_targets')
@file_validator
def html_files(filename):
return filename.split('.')[-1] in ['html', 'css', 'js']
@runnable
def run_targets(*args):
"""Run targets for Python."""
Options.show_coverage = 'coverage' in args
count = 0
for count, (command, title, retry) in enumerate(Options.targets, start=1):
success = call(command, title, retry)
if not success:
message = "✅ " * (count - 1) + "❌"
show_notification(message, title)
return False
message = "✅ " * count
title = "All Targets"
show_notification(message, title)
show_coverage()
return True
def call(command, title, retry):
"""Run a command-line program and display the result."""
if Options.rerun_args:
command, title, retry = Options.rerun_args
Options.rerun_args = None
success = call(command, title, retry)
if not success:
return False
print("")
print("$ %s" % ' '.join(command))
failure = subprocess.call(command)
if failure and retry:
Options.rerun_args = command, title, retry
return not failure
def show_notification(message, title):
"""Show a user notification."""
if notify and title:
notify(message, title=title, group=Options.group)
def show_coverage():
"""Launch the coverage report."""
if Options.show_coverage:
subprocess.call(['make', 'read-coverage'])
Options.show_coverage = False
| 22.94898
| 78
| 0.636727
|
import time
import subprocess
from sniffer.api import select_runnable, file_validator, runnable
try:
from pync import Notifier
except ImportError:
notify = None
else:
notify = Notifier.notify
watch_paths = ["flask_api"]
class Options(object):
group = int(time.time())
show_coverage = False
rerun_args = None
targets = [
(('make', 'test'), "Run Tests", True),
(('make', 'check'), "Static Analysis", True),
(('make', 'doc'), None, True),
]
@select_runnable('run_targets')
@file_validator
def python_files(filename):
return filename.endswith('.py')
@select_runnable('run_targets')
@file_validator
def html_files(filename):
return filename.split('.')[-1] in ['html', 'css', 'js']
@runnable
def run_targets(*args):
Options.show_coverage = 'coverage' in args
count = 0
for count, (command, title, retry) in enumerate(Options.targets, start=1):
success = call(command, title, retry)
if not success:
message = "✅ " * (count - 1) + "❌"
show_notification(message, title)
return False
message = "✅ " * count
title = "All Targets"
show_notification(message, title)
show_coverage()
return True
def call(command, title, retry):
if Options.rerun_args:
command, title, retry = Options.rerun_args
Options.rerun_args = None
success = call(command, title, retry)
if not success:
return False
print("")
print("$ %s" % ' '.join(command))
failure = subprocess.call(command)
if failure and retry:
Options.rerun_args = command, title, retry
return not failure
def show_notification(message, title):
if notify and title:
notify(message, title=title, group=Options.group)
def show_coverage():
if Options.show_coverage:
subprocess.call(['make', 'read-coverage'])
Options.show_coverage = False
| true
| true
|
7906bd20606532645f93c443038fb7d43a0b0c56
| 10,572
|
py
|
Python
|
modules/nashequilibrium.py
|
benedictvs/FOCS-Calculator
|
25dad4c6624be1950ce21594b4127c05be20b121
|
[
"MIT"
] | 1
|
2021-11-22T21:54:28.000Z
|
2021-11-22T21:54:28.000Z
|
modules/nashequilibrium.py
|
benedictvs/FOCS-Calculator
|
25dad4c6624be1950ce21594b4127c05be20b121
|
[
"MIT"
] | 34
|
2021-10-07T22:55:23.000Z
|
2021-12-06T00:48:55.000Z
|
modules/nashequilibrium.py
|
benedictvs/FOCS-Calculator
|
25dad4c6624be1950ce21594b4127c05be20b121
|
[
"MIT"
] | 1
|
2021-10-18T23:33:44.000Z
|
2021-10-18T23:33:44.000Z
|
from abstractclasses import solver, solver_model
"""
The Nash equilibrium solver takes a payoff matrix from game theory,
then it solves for a nash equilibrium, if one exists.
"""
# ————————————————————————————————————————————————
# NASH EQUILIBRIUM SOLVER CLASS
# ————————————————————————————————————————————————
class nash_equilibrium_solver(solver):
def format_payoff_matrix(
self,
payoff_matrix: list,
player_1_strategies: list,
player_2_strategies: list,
) -> str:
"""
This is a helper function that turns a payoff matrix and available
strategies into ASCII art of a payoff matrix
"""
ret = "\t Player 1\n"
ret += "\t " + player_1_strategies[0] + " "
for j in range(1, len(payoff_matrix[0])):
ret += player_1_strategies[j] + " "
ret += "\n"
ret += "\t +------------+"
for j in range(1, len(payoff_matrix[0])):
ret += "------------+"
ret += "\n"
ret += "Player 2 " + str(player_2_strategies[0]) + " |"
for j in range(len(payoff_matrix[0])):
ret += (
"{:>5g}, {:<5g}".format(
payoff_matrix[0][j][0], payoff_matrix[0][j][1]
)
+ "|"
)
ret += "\n"
for i in range(1, len(payoff_matrix)):
ret += "\t +------------+"
for j in range(1, len(payoff_matrix[0])):
ret += "------------+"
ret += "\n"
ret += (
"\t "
+ player_2_strategies[i]
+ " |"
+ "{:>5g}, {:<5g}".format(
payoff_matrix[i][0][0], payoff_matrix[i][0][1]
)
+ "|"
)
for j in range(1, len(payoff_matrix[i])):
ret += (
"{:>5g}, {:<5g}".format(
payoff_matrix[i][j][0], payoff_matrix[i][j][1]
)
+ "|"
)
ret += "\n"
ret += "\t +------------+"
for j in range(1, len(payoff_matrix[0])):
ret += "------------+"
ret += "\n"
return ret
def prompt_inputs(self) -> None:
player_1_strategies = [
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
]
player_2_strategies = [
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
]
num_strategies_1 = self.prompt_integer(
"Please enter the number of strategies for player 1 (2-13) > ",
2,
13,
)
num_strategies_2 = self.prompt_integer(
"Please enter the number of strategies for player 2 (2-13) > ",
2,
13,
)
player_1_strategies = player_1_strategies[:num_strategies_1]
player_2_strategies = player_2_strategies[:num_strategies_2]
payoff_matrix = [
[(0, 0) for i in range(num_strategies_1)]
for j in range(num_strategies_2)
]
print(
self.format_payoff_matrix(
payoff_matrix, player_1_strategies, player_2_strategies
)
)
for i in range(num_strategies_2):
for j in range(num_strategies_1):
player_1_payoff = self.prompt_float(
"Please enter the payoff value for Player "
+ str(1)
+ " in cell "
+ str(player_1_strategies[j])
+ ", "
+ str(player_2_strategies[i])
+ " of the payoff matrix > "
)
player_2_payoff = self.prompt_float(
"Please enter the payoff value for Player "
+ str(2)
+ " in cell "
+ str(player_1_strategies[j])
+ ", "
+ str(player_2_strategies[i])
+ " of the payoff matrix > "
)
payoff_matrix[i][j] = (player_2_payoff, player_1_payoff)
print(
self.format_payoff_matrix(
payoff_matrix, player_1_strategies, player_2_strategies
)
)
# Set inputs
self.inputs["payoff_matrix"] = payoff_matrix
self.inputs["player_1_strategies"] = player_1_strategies
self.inputs["player_2_strategies"] = player_2_strategies
self.inputs["format_payoff_matrix"] = self.format_payoff_matrix
# ————————————————————————————————————————————————
# NASH EQUILIBRIUM MODEL CLASS
# ————————————————————————————————————————————————
class nash_equilibrium_model(solver_model):
def __init__(self, **inputs) -> None:
super().__init__(**inputs)
self.format_payoff_matrix = self.inputs["format_payoff_matrix"]
def solve(self) -> None:
payoff_matrix = self.inputs["payoff_matrix"]
player_1_strategies = self.inputs["player_1_strategies"]
player_2_strategies = self.inputs["player_2_strategies"]
self.ans, self.work = self.nash(
payoff_matrix, player_1_strategies, player_2_strategies
)
def nash(
self,
payoff_matrix: list,
player_1_strategies: list,
player_2_strategies: list,
) -> tuple:
"""
Takes a payoff matrix from game theory and the available strategies for
both players. Solves for the Nash equilibrium
"""
work = ""
no_dominant_exists = False
while not no_dominant_exists and not (
len(player_1_strategies) == 1 and len(player_2_strategies) == 1
):
is_break = False
for i in range(len(payoff_matrix)):
for j in range(len(payoff_matrix)):
if (
i != j
and i < len(payoff_matrix)
and j < len(payoff_matrix)
):
is_greater = False
for k in range(len(payoff_matrix[0])):
if float(payoff_matrix[i][k][0]) >= float(
payoff_matrix[j][k][0]
):
is_greater = True
if is_greater:
break
if not is_greater:
work += (
"Player 2's Strategy "
+ str(player_2_strategies[j])
+ " dominates strategy "
+ str(player_2_strategies[i])
+ "\n"
)
payoff_matrix.pop(i)
player_2_strategies.pop(i)
is_break = True
work += self.format_payoff_matrix(
payoff_matrix,
player_1_strategies,
player_2_strategies,
)
work += "\n"
break
if is_break:
break
if not is_break:
no_dominant_exists = True
else:
no_dominant_exists = False
is_break = False
for i in range(len(payoff_matrix[0])):
for j in range(len(payoff_matrix[0])):
if (
i != j
and i < len(payoff_matrix[0])
and j < len(payoff_matrix[0])
):
is_greater = False
for k in range(len(payoff_matrix)):
if float(payoff_matrix[k][i][1]) >= float(
payoff_matrix[k][j][1]
):
is_greater = True
if is_greater:
break
if not is_greater:
work += (
"Player 1's Strategy "
+ str(player_1_strategies[j])
+ " dominates strategy "
+ str(player_1_strategies[i])
+ "\n"
)
for index in range(len(payoff_matrix)):
payoff_matrix[index].pop(i)
player_1_strategies.pop(i)
work += self.format_payoff_matrix(
payoff_matrix,
player_1_strategies,
player_2_strategies,
)
work += "\n"
is_break = True
break
if not is_break:
no_dominant_exists = True
else:
no_dominant_exists = False
if is_break:
no_dominant_exists = False
if not (
len(player_1_strategies) == 1 and len(player_2_strategies) == 1
):
ans = (
"There is no Nash equilibrium, since at least one player has"
+ " multiple viable strategies.\n"
)
work += ans
work += self.format_payoff_matrix(
payoff_matrix, player_1_strategies, player_2_strategies
)
else:
ans = (
"This is the Nash equilibrium of the entered payoff matrix,"
+ " calculated by eliminating dominanted strategies.\n"
)
ans += self.format_payoff_matrix(
payoff_matrix, player_1_strategies, player_2_strategies
)
work += ans
return ans, work
| 35.006623
| 79
| 0.404748
|
from abstractclasses import solver, solver_model
class nash_equilibrium_solver(solver):
def format_payoff_matrix(
self,
payoff_matrix: list,
player_1_strategies: list,
player_2_strategies: list,
) -> str:
ret = "\t Player 1\n"
ret += "\t " + player_1_strategies[0] + " "
for j in range(1, len(payoff_matrix[0])):
ret += player_1_strategies[j] + " "
ret += "\n"
ret += "\t +------------+"
for j in range(1, len(payoff_matrix[0])):
ret += "------------+"
ret += "\n"
ret += "Player 2 " + str(player_2_strategies[0]) + " |"
for j in range(len(payoff_matrix[0])):
ret += (
"{:>5g}, {:<5g}".format(
payoff_matrix[0][j][0], payoff_matrix[0][j][1]
)
+ "|"
)
ret += "\n"
for i in range(1, len(payoff_matrix)):
ret += "\t +------------+"
for j in range(1, len(payoff_matrix[0])):
ret += "------------+"
ret += "\n"
ret += (
"\t "
+ player_2_strategies[i]
+ " |"
+ "{:>5g}, {:<5g}".format(
payoff_matrix[i][0][0], payoff_matrix[i][0][1]
)
+ "|"
)
for j in range(1, len(payoff_matrix[i])):
ret += (
"{:>5g}, {:<5g}".format(
payoff_matrix[i][j][0], payoff_matrix[i][j][1]
)
+ "|"
)
ret += "\n"
ret += "\t +------------+"
for j in range(1, len(payoff_matrix[0])):
ret += "------------+"
ret += "\n"
return ret
def prompt_inputs(self) -> None:
player_1_strategies = [
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
]
player_2_strategies = [
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
]
num_strategies_1 = self.prompt_integer(
"Please enter the number of strategies for player 1 (2-13) > ",
2,
13,
)
num_strategies_2 = self.prompt_integer(
"Please enter the number of strategies for player 2 (2-13) > ",
2,
13,
)
player_1_strategies = player_1_strategies[:num_strategies_1]
player_2_strategies = player_2_strategies[:num_strategies_2]
payoff_matrix = [
[(0, 0) for i in range(num_strategies_1)]
for j in range(num_strategies_2)
]
print(
self.format_payoff_matrix(
payoff_matrix, player_1_strategies, player_2_strategies
)
)
for i in range(num_strategies_2):
for j in range(num_strategies_1):
player_1_payoff = self.prompt_float(
"Please enter the payoff value for Player "
+ str(1)
+ " in cell "
+ str(player_1_strategies[j])
+ ", "
+ str(player_2_strategies[i])
+ " of the payoff matrix > "
)
player_2_payoff = self.prompt_float(
"Please enter the payoff value for Player "
+ str(2)
+ " in cell "
+ str(player_1_strategies[j])
+ ", "
+ str(player_2_strategies[i])
+ " of the payoff matrix > "
)
payoff_matrix[i][j] = (player_2_payoff, player_1_payoff)
print(
self.format_payoff_matrix(
payoff_matrix, player_1_strategies, player_2_strategies
)
)
self.inputs["payoff_matrix"] = payoff_matrix
self.inputs["player_1_strategies"] = player_1_strategies
self.inputs["player_2_strategies"] = player_2_strategies
self.inputs["format_payoff_matrix"] = self.format_payoff_matrix
class nash_equilibrium_model(solver_model):
def __init__(self, **inputs) -> None:
super().__init__(**inputs)
self.format_payoff_matrix = self.inputs["format_payoff_matrix"]
def solve(self) -> None:
payoff_matrix = self.inputs["payoff_matrix"]
player_1_strategies = self.inputs["player_1_strategies"]
player_2_strategies = self.inputs["player_2_strategies"]
self.ans, self.work = self.nash(
payoff_matrix, player_1_strategies, player_2_strategies
)
def nash(
self,
payoff_matrix: list,
player_1_strategies: list,
player_2_strategies: list,
) -> tuple:
work = ""
no_dominant_exists = False
while not no_dominant_exists and not (
len(player_1_strategies) == 1 and len(player_2_strategies) == 1
):
is_break = False
for i in range(len(payoff_matrix)):
for j in range(len(payoff_matrix)):
if (
i != j
and i < len(payoff_matrix)
and j < len(payoff_matrix)
):
is_greater = False
for k in range(len(payoff_matrix[0])):
if float(payoff_matrix[i][k][0]) >= float(
payoff_matrix[j][k][0]
):
is_greater = True
if is_greater:
break
if not is_greater:
work += (
"Player 2's Strategy "
+ str(player_2_strategies[j])
+ " dominates strategy "
+ str(player_2_strategies[i])
+ "\n"
)
payoff_matrix.pop(i)
player_2_strategies.pop(i)
is_break = True
work += self.format_payoff_matrix(
payoff_matrix,
player_1_strategies,
player_2_strategies,
)
work += "\n"
break
if is_break:
break
if not is_break:
no_dominant_exists = True
else:
no_dominant_exists = False
is_break = False
for i in range(len(payoff_matrix[0])):
for j in range(len(payoff_matrix[0])):
if (
i != j
and i < len(payoff_matrix[0])
and j < len(payoff_matrix[0])
):
is_greater = False
for k in range(len(payoff_matrix)):
if float(payoff_matrix[k][i][1]) >= float(
payoff_matrix[k][j][1]
):
is_greater = True
if is_greater:
break
if not is_greater:
work += (
"Player 1's Strategy "
+ str(player_1_strategies[j])
+ " dominates strategy "
+ str(player_1_strategies[i])
+ "\n"
)
for index in range(len(payoff_matrix)):
payoff_matrix[index].pop(i)
player_1_strategies.pop(i)
work += self.format_payoff_matrix(
payoff_matrix,
player_1_strategies,
player_2_strategies,
)
work += "\n"
is_break = True
break
if not is_break:
no_dominant_exists = True
else:
no_dominant_exists = False
if is_break:
no_dominant_exists = False
if not (
len(player_1_strategies) == 1 and len(player_2_strategies) == 1
):
ans = (
"There is no Nash equilibrium, since at least one player has"
+ " multiple viable strategies.\n"
)
work += ans
work += self.format_payoff_matrix(
payoff_matrix, player_1_strategies, player_2_strategies
)
else:
ans = (
"This is the Nash equilibrium of the entered payoff matrix,"
+ " calculated by eliminating dominanted strategies.\n"
)
ans += self.format_payoff_matrix(
payoff_matrix, player_1_strategies, player_2_strategies
)
work += ans
return ans, work
| true
| true
|
7906bdc7eeb824c707621cf046e5d99696ba3c84
| 546
|
py
|
Python
|
runtests.py
|
eshares/django-lockdown
|
c7efb3cddf521eea9e299917bb86be501a3415dc
|
[
"BSD-3-Clause"
] | null | null | null |
runtests.py
|
eshares/django-lockdown
|
c7efb3cddf521eea9e299917bb86be501a3415dc
|
[
"BSD-3-Clause"
] | null | null | null |
runtests.py
|
eshares/django-lockdown
|
c7efb3cddf521eea9e299917bb86be501a3415dc
|
[
"BSD-3-Clause"
] | 2
|
2021-03-04T22:25:35.000Z
|
2021-03-05T00:27:42.000Z
|
#!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def runtests(*test_args):
"""Setup and run django-lockdowns test suite."""
os.environ['DJANGO_SETTINGS_MODULE'] = 'lockdown.tests.test_settings'
django.setup()
if not test_args:
test_args = ['lockdown.tests']
test_runner = get_runner(settings)()
failures = test_runner.run_tests(test_args)
sys.exit(bool(failures))
if __name__ == '__main__':
runtests(*sys.argv[1:])
| 20.222222
| 73
| 0.705128
|
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def runtests(*test_args):
os.environ['DJANGO_SETTINGS_MODULE'] = 'lockdown.tests.test_settings'
django.setup()
if not test_args:
test_args = ['lockdown.tests']
test_runner = get_runner(settings)()
failures = test_runner.run_tests(test_args)
sys.exit(bool(failures))
if __name__ == '__main__':
runtests(*sys.argv[1:])
| true
| true
|
7906bddcf68956fa040b9b3e181f0109c06602d0
| 3,094
|
py
|
Python
|
bayes_implicit_solvent/rjmc_experiments/tree_rjmc2.py
|
openforcefield/bayes-implicit-solvent
|
067239fcbb8af28eb6310d702804887662692ec2
|
[
"MIT"
] | 4
|
2019-11-12T16:23:26.000Z
|
2021-07-01T05:37:37.000Z
|
bayes_implicit_solvent/rjmc_experiments/tree_rjmc2.py
|
openforcefield/bayes-implicit-solvent
|
067239fcbb8af28eb6310d702804887662692ec2
|
[
"MIT"
] | 4
|
2019-01-18T22:05:03.000Z
|
2019-11-12T18:37:31.000Z
|
bayes_implicit_solvent/rjmc_experiments/tree_rjmc2.py
|
openforcefield/bayes-implicit-solvent
|
067239fcbb8af28eb6310d702804887662692ec2
|
[
"MIT"
] | 2
|
2019-12-02T20:23:56.000Z
|
2021-03-25T23:28:36.000Z
|
import numpy as np
from bayes_implicit_solvent.continuous_parameter_experiments.elemental_types_mh import log_prior, mols, ll, data_path, \
smiles
smiles_list = smiles
from bayes_implicit_solvent.typers import RADIUS_UNIT
from bayes_implicit_solvent.freesolv import smiles_list
from bayes_implicit_solvent.typers import AtomSpecificationProposal
np.random.seed(0)
from bayes_implicit_solvent.gb_models.obc2_parameters import mbondi_model
initial_tree = mbondi_model
initial_tree.remove_node('[#14]') # otherwise everything is -inf, because this type will be empty
initial_tree.proposal_sigmas['radius'] = 1e-2 * RADIUS_UNIT
initial_tree.proposal_sigmas['scale_factor'] = 1e-2
# add one more parameter per element appearing in FreeSolv but not specified in obc2 parameter set to initial tree
for i in [17, 35, 53]:
smirks = '[#{}]'.format(i)
initial_tree.add_child(smirks, '*')
initial_tree.un_delete_able_types.add(smirks)
specifiers = ['X1', 'X2', 'X3', 'X4', 'a', 'A', '-1', '+0', '+1', '+2']
atom_specification_proposal = AtomSpecificationProposal(atomic_specifiers=specifiers)
smirks_elaboration_proposal = atom_specification_proposal
print('initial tree:')
print(initial_tree)
n_configuration_samples = 25
import os
name = 'tree_rjmc_n_config={}_{}_ll'.format(n_configuration_samples, ll)
smiles_subset_fname = os.path.join(data_path,
'smiles_subset_{}.txt'.format(name))
with open(smiles_subset_fname, 'w') as f:
f.writelines(['{}\n'.format(s) for s in smiles_list])
from bayes_implicit_solvent.prior_checking import check_no_empty_types
error_y_trees = []
def log_prob(tree):
log_prior_value = check_no_empty_types(tree)
theta = np.hstack([tree.get_radii(), tree.get_scale_factors()])
log_prior_value += log_prior(theta)
if log_prior_value > -np.inf:
try:
# TODO: Parallelize. Note that multiprocessing.Pool won't work here because it doesn't play nice with SwigPy objects
# TODO: update to allow scale factors to be variable also
log_likelihood_value = 0
for mol in mols:
radii = tree.assign_radii(mol.mol) / RADIUS_UNIT
scale_factors = tree.assign_scale_factors(mol.mol)
log_likelihood_value += mol.log_prob(radii, scale_factors)
except:
global error_y_trees
error_y_trees.append(tree)
print('Warning! Encountered un-anticipated exception!')
return - np.inf
return log_prior_value + log_likelihood_value
else:
return log_prior_value
from bayes_implicit_solvent.samplers import tree_rjmc
from pickle import dump
n_iterations = 10000
result = tree_rjmc(initial_tree, log_prob, smirks_elaboration_proposal, n_iterations=n_iterations,
fraction_cross_model_proposals=0.1)
with open('elaborate_tree_rjmc2_run_n_compounds={}_n_iter={}_gaussian_ll.pkl'.format(len(mols), n_iterations),
'wb') as f:
dump(result, f)
with open('error_y_trees.pkl', 'wb') as f:
dump(error_y_trees, f)
| 34.764045
| 128
| 0.723659
|
import numpy as np
from bayes_implicit_solvent.continuous_parameter_experiments.elemental_types_mh import log_prior, mols, ll, data_path, \
smiles
smiles_list = smiles
from bayes_implicit_solvent.typers import RADIUS_UNIT
from bayes_implicit_solvent.freesolv import smiles_list
from bayes_implicit_solvent.typers import AtomSpecificationProposal
np.random.seed(0)
from bayes_implicit_solvent.gb_models.obc2_parameters import mbondi_model
initial_tree = mbondi_model
initial_tree.remove_node('[#14]')
initial_tree.proposal_sigmas['radius'] = 1e-2 * RADIUS_UNIT
initial_tree.proposal_sigmas['scale_factor'] = 1e-2
for i in [17, 35, 53]:
smirks = '[#{}]'.format(i)
initial_tree.add_child(smirks, '*')
initial_tree.un_delete_able_types.add(smirks)
specifiers = ['X1', 'X2', 'X3', 'X4', 'a', 'A', '-1', '+0', '+1', '+2']
atom_specification_proposal = AtomSpecificationProposal(atomic_specifiers=specifiers)
smirks_elaboration_proposal = atom_specification_proposal
print('initial tree:')
print(initial_tree)
n_configuration_samples = 25
import os
name = 'tree_rjmc_n_config={}_{}_ll'.format(n_configuration_samples, ll)
smiles_subset_fname = os.path.join(data_path,
'smiles_subset_{}.txt'.format(name))
with open(smiles_subset_fname, 'w') as f:
f.writelines(['{}\n'.format(s) for s in smiles_list])
from bayes_implicit_solvent.prior_checking import check_no_empty_types
error_y_trees = []
def log_prob(tree):
log_prior_value = check_no_empty_types(tree)
theta = np.hstack([tree.get_radii(), tree.get_scale_factors()])
log_prior_value += log_prior(theta)
if log_prior_value > -np.inf:
try:
log_likelihood_value = 0
for mol in mols:
radii = tree.assign_radii(mol.mol) / RADIUS_UNIT
scale_factors = tree.assign_scale_factors(mol.mol)
log_likelihood_value += mol.log_prob(radii, scale_factors)
except:
global error_y_trees
error_y_trees.append(tree)
print('Warning! Encountered un-anticipated exception!')
return - np.inf
return log_prior_value + log_likelihood_value
else:
return log_prior_value
from bayes_implicit_solvent.samplers import tree_rjmc
from pickle import dump
n_iterations = 10000
result = tree_rjmc(initial_tree, log_prob, smirks_elaboration_proposal, n_iterations=n_iterations,
fraction_cross_model_proposals=0.1)
with open('elaborate_tree_rjmc2_run_n_compounds={}_n_iter={}_gaussian_ll.pkl'.format(len(mols), n_iterations),
'wb') as f:
dump(result, f)
with open('error_y_trees.pkl', 'wb') as f:
dump(error_y_trees, f)
| true
| true
|
7906be01ad0bc584794a825f60475442d7cbe8b7
| 28,398
|
py
|
Python
|
src/static_analyzer/Gadget.py
|
michaelbrownuc/GadgetSetAnalyzer
|
40eeb0b9f055b19715de0ea4ed1f9acca92059ad
|
[
"MIT"
] | 10
|
2019-08-17T00:44:52.000Z
|
2022-03-29T02:58:40.000Z
|
src/static_analyzer/Gadget.py
|
michaelbrownuc/GadgetSetAnalyzer
|
40eeb0b9f055b19715de0ea4ed1f9acca92059ad
|
[
"MIT"
] | 9
|
2019-08-24T19:04:52.000Z
|
2022-03-29T03:18:59.000Z
|
src/static_analyzer/Gadget.py
|
michaelbrownuc/GadgetSetAnalyzer
|
40eeb0b9f055b19715de0ea4ed1f9acca92059ad
|
[
"MIT"
] | 2
|
2020-11-21T15:25:59.000Z
|
2022-03-02T03:17:25.000Z
|
"""
Gadget class
"""
# Standard Library Imports
# Third Party Imports
# Local Imports
from static_analyzer.Instruction import Instruction
class Gadget(object):
"""
The Gadget class represents a single gadget.
"""
def __init__(self, raw_gadget):
"""
Gadget constructor
:param str raw_gadget: raw line output from ROPgadget
"""
# Parse the raw line
self.offset = raw_gadget[:raw_gadget.find(":")]
self.instruction_string = raw_gadget[raw_gadget.find(":") + 2:]
# Parse instruction objects
self.instructions = []
for instr in self.instruction_string.split(" ; "):
self.instructions.append(Instruction(instr))
# Initialize score
self.score = 0.0
def is_useless_op(self):
"""
:return boolean: Returns True if the first instruction opcode is in the "useless" list, False otherwise
Default behavior is to consider opcodes useful unless otherwise observed.
"""
first_opcode = self.instructions[0].opcode
# Bulk catch for all "jump" opcodes: No reason to include the instruction, just use the suffix directly
if first_opcode.startswith("j"):
return True
# Bulk catch for bounds checked jumps, same reason as above
if first_opcode.startswith("bnd"):
return True
# Bulk catch for all "ret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("ret"):
return True
# Bulk catch for all "iret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("iret"):
return True
# Bulk catch for all "call" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("call"):
return True
# Useless opcodes:
# NOP - No reason to include the instruction, just use the suffix directly
# LJMP - Same reason as "jump" opcodes above
useless = ["nop", "fnop", "ljmp"]
return first_opcode in useless
def contains_unusable_op(self):
"""
:return boolean: Returns True if any instruction opcode is unusable. False otherwise
unusable instructions are Ring-0 opcodes that trap in user mode and some other exceptional ops.
"""
for instr in self.instructions:
# Bulk catch for all "invalidate" opcodes: Ring-0 instructions
if instr.opcode.startswith("inv"):
return True
# Bulk catch for all "Virtual-Machine" opcodes: Ring-0 instructions
if instr.opcode.startswith("vm") and instr.opcode != "vminsd" and instr.opcode != "vminpd":
return True
# Bulk catch for all "undefined" opcodes
if instr.opcode.startswith("ud"):
return True
# Other Ring-0 opcodes and RSM, LOCK prefix
unusable = ["clts", "hlt", "lgdt", "lidt", "lldt", "lmsw", "ltr", "monitor", "mwait",
"swapgs", "sysexit", "sysreturn", "wbinvd", "wrmsr", "xsetbv", "rsm", "lock"]
if instr.opcode in unusable:
return True
# Check for ring-0 operands (control, debug, and test registers)
if instr.op1 is not None:
if instr.op1.startswith("cr") or instr.op1.startswith("tr") or instr.op1.startswith("db"):
return True
if instr.op2 is not None:
if instr.op2.startswith("cr") or instr.op2.startswith("tr") or instr.op2.startswith("db"):
return True
return False
def is_gpi_only(self):
"""
:return boolean: Returns True if the gadget is a single instruction and starts with 'ret', 'jmp', or 'call',
False otherwise
"""
if len(self.instructions) == 1:
opcode = self.instructions[0].opcode
if opcode.startswith("ret") or opcode.startswith("jmp") or opcode.startswith("call"):
return True
return False
def is_invalid_branch(self):
"""
:return boolean: Returns True if the gadget is 'jmp' or 'call' ending and the call target is a constant offset
or does not target a recognized register family. False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("call") or last_instr.opcode.startswith("jmp"):
if Instruction.get_operand_register_family(last_instr.op1) is None:
return True
return False
def has_invalid_ret_offset(self):
"""
:return boolean: Returns True if the gadget is 'ret' ending and contains a constant offset that is not byte
aligned or is greater than 32 bytes, False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("ret") and last_instr.op1 is not None:
offset = Instruction.get_operand_as_constant(last_instr.op1)
if (offset % 2 != 0) or (offset > 32):
return True
return False
def clobbers_created_value(self):
"""
:return boolean: Returns True if the gadget completely overwrites the value created in the first instruction,
False otherwise.
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
return False
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
# Most likely means first operand is a constant, exclude from analysis
if first_family is None:
return False
# Iterate through intermediate instructions, determine if it overwrites protected value (or part of it)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value() or "xchg" in cur_instr.opcode:
continue
# Check for non-static modification of the register family
if first_family == Instruction.get_operand_register_family(cur_instr.op1):
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def creates_unusable_value(self):
"""
:return boolean: Returns True if the gadget creates a value in segment or extension registers, or are
RIP-relative, or are constant memory locations; False otherwise.
"""
# Check if the first instruction creates a value (or may potentially set a flag
first_instr = self.instructions[0]
if first_instr.opcode in ["cmp", "test", "push"] or first_instr.op1 is None:
return False
# Check if first operand is not a constant and it does not belong to a recognized register family
if not Instruction.is_constant(first_instr.op1) and \
Instruction.get_operand_register_family(first_instr.op1) is None:
return True
return False
def contains_intermediate_GPI(self):
"""
:return boolean: Returns True if the gadget's intermediate instructions contain a GPI (or a generic interrupt),
False otherwise.
"""
for i in range(len(self.instructions)-1):
cur_opcode = self.instructions[i].opcode
cur_target = self.instructions[i].op1
if cur_opcode.startswith("ret") or \
cur_opcode == "syscall" or cur_opcode == "sysenter" or cur_opcode.startswith("int") or \
("jmp" in cur_opcode and not Instruction.is_constant(cur_target)) or \
("call" in cur_opcode and not Instruction.is_constant(cur_target)):
return True
return False
def clobbers_stack_pointer(self):
"""
:return boolean: Returns True if the ROP gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
# Only check ROP gadgets
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("ret"):
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the stack pointer register family
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "pop"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def clobbers_indirect_target(self):
"""
:return boolean: Returns True if the JOP/COP gadget's instructions modify the indirect branch register in
certain ways, False otherwise.
"""
# Get the register family of the indirect jump / call
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
family = Instruction.get_operand_register_family(last_instr.op1)
# Check each instruction to see if it clobbers the value
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# First check if the instruction modifies the target
if cur_instr.op1 in Instruction.register_families[family]:
# Does the instruction zeroize out the target?
if cur_instr.opcode == "xor" and cur_instr.op1 == cur_instr.op2:
return True
# Does the instruction perform a RIP-relative LEA into the target?
if cur_instr.opcode == "lea" and ("rip" in cur_instr.op2 or "eip" in cur_instr.op2):
return True
# Does the instruction load a string or a value of an input port into the target?
if cur_instr.opcode.startswith("lods") or cur_instr.opcode == "in":
return True
# Does the instruction overwrite the target with a static value or segment register value?
if "mov" in cur_instr.opcode and (Instruction.is_constant(cur_instr.op2) or
Instruction.get_operand_register_family(cur_instr.op2) is None):
return True
return False
def has_invalid_int_handler(self):
"""
:return boolean: Returns True if the gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("int") and last_instr.op1 != "0x80":
return True
return False
def is_rip_relative_indirect_branch(self):
"""
:return boolean: Returns True if the gadget is a JOP/COP gadget relying on a RIP relative indirect branch,
False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
if "rip" in last_instr.op1 or "eip" in last_instr.op1:
return True
return False
def contains_static_call(self):
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("call") and Instruction.is_constant(cur_instr.op1):
return True
return False
def is_equal(self, rhs):
"""
:return boolean: Returns True if the gadgets are an exact match, including offset. Used for gadget locality.
"""
return self.offset == rhs.offset and self.instruction_string == rhs.instruction_string
def is_duplicate(self, rhs):
"""
:return boolean: Returns True if the gadgets are a semantic match. Used for non-locality gadget metrics.
Semantic match is defined as the exact same sequence of equivalent instructions.
"""
if len(self.instructions) != len(rhs.instructions):
return False
for i in range(len(self.instructions)):
if not self.instructions[i].is_equivalent(rhs.instructions[i]):
return False
return True
def is_JOP_COP_dispatcher(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP dispatcher. Defined as a gadget that begins with a
arithmetic operation on a register and ends with a branch to a deference of that register. Used
to iterate through instructions in payload. Only restrictions on the arithmetic operation is
that it doesn't use the same register as both operands.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
# Only consider gadgets that end in dereference of a register and start with opcodes of interest
if "[" in last_instr.op1 and \
first_instr.opcode in ["inc", "dec", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(last_instr.op1)
arith_target_1 = Instruction.get_operand_register_family(first_instr.op1)
# Secondary check: if the second op is a constant ensure it is in range [1, 32]
if Instruction.is_constant(first_instr.op2):
additive_value = Instruction.get_operand_as_constant(first_instr.op2)
if additive_value < 1 or additive_value > 32:
return False
arith_target_2 = Instruction.get_operand_register_family(first_instr.op2)
return gpi_target == arith_target_1 and arith_target_1 != arith_target_2
return False
def is_JOP_COP_dataloader(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP data loader. Defined as a gadget that begins with a
pop opcode to a non-memory location, that is also not the target of the GPI. Used to pop a
necessary value off stack en masse before redirecting to the dispatcher.
"""
first_instr = self.instructions[0]
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(self.instructions[len(self.instructions) - 1].op1)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target != pop_target
return False
def is_JOP_initializer(self):
"""
:return boolean: Returns True if the gadget is a JOP Initializer. Defined as a gadget that begins with a
"pop all" opcode, used to pop necessary values off stack en masse before redirecting to the
dispatcher.
"""
return self.instructions[0].opcode.startswith("popa")
def is_JOP_trampoline(self):
"""
:return boolean: Returns True if the gadget is a JOP trampoline. Defined as a gadget that begins with a
pop opcode to a non-memory location, and that ends in a dereference of that value. Used to
redirect execution to value stored in memory.
"""
first_instr = self.instructions[0]
gpi_target_op = self.instructions[len(self.instructions) - 1].op1
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(gpi_target_op)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target == pop_target and "[" in gpi_target_op
return False
def is_COP_initializer(self):
"""
:return boolean: Returns True if the gadget is a COP initializer. Defined as a gadget that begins with a
"pop all" opcode, does not use register bx/cx/dx/di as the call target, and does not clobber
bx/cx/dx or the call target in an intermediate instruction
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions)-1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
if first_instr.opcode.startswith("popa") and call_target not in [1, 2, 3, 5]: # BX, CX, DX, DI families
# Build collective list of register families to protect from being clobbered
protected_families = [1, 2, 3, call_target]
protected_registers = []
for family in protected_families:
for register in Instruction.register_families[family]:
protected_registers.append(register)
# Scan intermediate instructions to ensure they do not clobber a protected register
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the register family
if cur_instr.op1 in protected_registers:
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return False
return True
return False
def is_COP_strong_trampoline(self):
"""
:return boolean: Returns True if the gadget is a COP strong trampoline. Defined as a gadget that begins with a
pop opcode, and contains at least one other pop operation. The last non-pop all operation must
target the call target.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
# Only consider instructions that start with a pop
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
cnt_pops = 1
last_pop_target = first_instr.op1
# Scan intermediate instructions for pops
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("popa"):
cnt_pops += 1
if cur_instr.opcode == "pop" and "[" not in cur_instr.op1:
cnt_pops += 1
last_pop_target = cur_instr.op1
# Check that at least two pops occurred and the last pop target is the call target
if cnt_pops > 1 and last_pop_target in Instruction.register_families[call_target]:
return True
return False
def is_COP_intrastack_pivot(self):
"""
:return boolean: Returns True if the gadget is a COP Intra-stack pivot gadget. Defined as a gadget that begins
with an additive operation on the stack pointer register. Used to move around in shellcode
during COP exploits. Only restriction on the arithmetic operation is that the second operand
is not a pointer.
"""
first_instr = self.instructions[0]
if first_instr.opcode in ["inc", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
arith_target = Instruction.get_operand_register_family(first_instr.op1)
if arith_target == 7: # RSP, ESP family number
if first_instr.op2 is None or "[" not in first_instr.op2:
return True
return False
def check_contains_leave(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate "leave" instruction.
"""
for i in range(1, len(self.instructions)-1):
if self.instructions[i].opcode == "leave":
self.score += 2.0
return # Only penalize gadget once
def check_sp_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the stack pointer register family.
"""
# Scan instructions to determine if they modify the stack pointer register family
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 4 for move, load address, and exchange ops, 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if "xchg" in cur_instr.opcode or "mov" in cur_instr.opcode or cur_instr.opcode in ["lea"]:
self.score += 4.0
elif cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
elif cur_instr.opcode == "pop":
self.score += 1.0
else:
self.score += 2.0 # Will be a static modification, otherwise it would have been rejected earlier
def check_negative_sp_offsets(self):
"""
:return void: Increases gadget's score if its cumulative register offsets are negative.
"""
sp_offset = 0
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)):
cur_instr = self.instructions[i]
if cur_instr.opcode == "push":
sp_offset -= 8
elif cur_instr.opcode == "pop" and cur_instr.op1 not in Instruction.register_families[7]:
sp_offset += 8
elif cur_instr.opcode in ["add", "adc"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset += Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode in ["sub", "sbb"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset -= Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode == "inc" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset += 1
elif cur_instr.opcode == "dec" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset -= 1
elif cur_instr.opcode.startswith("ret") and cur_instr.op1 is not None:
sp_offset += Instruction.get_operand_as_constant(cur_instr.op1)
if sp_offset < 0:
self.score += 2.0
def check_contains_conditional_op(self):
"""
:return void: Increases gadget's score if it contains conditional instructions like jumps, sets, and moves.
"""
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("j") and cur_instr.opcode != "jmp":
self.score += 3.0
elif "cmov" in cur_instr.opcode or "cmpxchg" in cur_instr.opcode:
self.score += 2.0
elif "set" in cur_instr.opcode:
self.score += 1.0
def check_register_ops(self):
"""
:return void: Increases gadget's score if it contains operations on a value carrying or a bystander register
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
first_family = None
else:
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# If the new value is a modification of the value-carrying register
if first_family is not None and first_family == Instruction.get_operand_register_family(cur_instr.op1):
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 1.5
else:
self.score += 1.0 # Will be a static modification, otherwise it would have been rejected earlier
elif "xchg" not in cur_instr.opcode and cur_instr.opcode != "pop":
# The modification is to a "bystander register". static mods +0.5, non-static +1.0
if cur_instr.op2 is not None and Instruction.get_operand_register_family(cur_instr.op2) is not None:
self.score += 1.0
else:
self.score += 0.5
def check_branch_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the indirect branch target register family.
"""
last_instr = self.instructions[len(self.instructions)-1]
target_family = Instruction.get_operand_register_family(last_instr.op1)
# Scan instructions to determine if they modify the target register family
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == target_family:
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
else: # All other modifications to target register
self.score += 2.0
def check_memory_writes(self):
"""
:return void: Increases gadget's score if the gadget has an instruction that writes to memory.
"""
# Iterate through instructions except GPI
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Have to check both operands for xchg instrucitons
if "xchg" in cur_instr.opcode and ("[" in cur_instr.op1 or "[" in cur_instr.op2):
self.score += 1.0
elif cur_instr.op1 is not None and "[" in cur_instr.op1:
self.score += 1.0
| 46.326264
| 120
| 0.610501
|
from static_analyzer.Instruction import Instruction
class Gadget(object):
def __init__(self, raw_gadget):
self.offset = raw_gadget[:raw_gadget.find(":")]
self.instruction_string = raw_gadget[raw_gadget.find(":") + 2:]
self.instructions = []
for instr in self.instruction_string.split(" ; "):
self.instructions.append(Instruction(instr))
self.score = 0.0
def is_useless_op(self):
first_opcode = self.instructions[0].opcode
if first_opcode.startswith("j"):
return True
if first_opcode.startswith("bnd"):
return True
if first_opcode.startswith("ret"):
return True
if first_opcode.startswith("iret"):
return True
if first_opcode.startswith("call"):
return True
useless = ["nop", "fnop", "ljmp"]
return first_opcode in useless
def contains_unusable_op(self):
for instr in self.instructions:
if instr.opcode.startswith("inv"):
return True
if instr.opcode.startswith("vm") and instr.opcode != "vminsd" and instr.opcode != "vminpd":
return True
if instr.opcode.startswith("ud"):
return True
unusable = ["clts", "hlt", "lgdt", "lidt", "lldt", "lmsw", "ltr", "monitor", "mwait",
"swapgs", "sysexit", "sysreturn", "wbinvd", "wrmsr", "xsetbv", "rsm", "lock"]
if instr.opcode in unusable:
return True
if instr.op1 is not None:
if instr.op1.startswith("cr") or instr.op1.startswith("tr") or instr.op1.startswith("db"):
return True
if instr.op2 is not None:
if instr.op2.startswith("cr") or instr.op2.startswith("tr") or instr.op2.startswith("db"):
return True
return False
def is_gpi_only(self):
if len(self.instructions) == 1:
opcode = self.instructions[0].opcode
if opcode.startswith("ret") or opcode.startswith("jmp") or opcode.startswith("call"):
return True
return False
def is_invalid_branch(self):
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("call") or last_instr.opcode.startswith("jmp"):
if Instruction.get_operand_register_family(last_instr.op1) is None:
return True
return False
def has_invalid_ret_offset(self):
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("ret") and last_instr.op1 is not None:
offset = Instruction.get_operand_as_constant(last_instr.op1)
if (offset % 2 != 0) or (offset > 32):
return True
return False
def clobbers_created_value(self):
first_instr = self.instructions[0]
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
return False
first_family = Instruction.get_operand_register_family(first_instr.op1)
if first_family is None:
return False
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if not cur_instr.creates_value() or "xchg" in cur_instr.opcode:
continue
if first_family == Instruction.get_operand_register_family(cur_instr.op1):
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def creates_unusable_value(self):
first_instr = self.instructions[0]
if first_instr.opcode in ["cmp", "test", "push"] or first_instr.op1 is None:
return False
if not Instruction.is_constant(first_instr.op1) and \
Instruction.get_operand_register_family(first_instr.op1) is None:
return True
return False
def contains_intermediate_GPI(self):
for i in range(len(self.instructions)-1):
cur_opcode = self.instructions[i].opcode
cur_target = self.instructions[i].op1
if cur_opcode.startswith("ret") or \
cur_opcode == "syscall" or cur_opcode == "sysenter" or cur_opcode.startswith("int") or \
("jmp" in cur_opcode and not Instruction.is_constant(cur_target)) or \
("call" in cur_opcode and not Instruction.is_constant(cur_target)):
return True
return False
def clobbers_stack_pointer(self):
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("ret"):
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
if not cur_instr.creates_value():
continue
if Instruction.get_operand_register_family(cur_instr.op1) == 7:
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "pop"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def clobbers_indirect_target(self):
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
family = Instruction.get_operand_register_family(last_instr.op1)
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.op1 in Instruction.register_families[family]:
if cur_instr.opcode == "xor" and cur_instr.op1 == cur_instr.op2:
return True
if cur_instr.opcode == "lea" and ("rip" in cur_instr.op2 or "eip" in cur_instr.op2):
return True
if cur_instr.opcode.startswith("lods") or cur_instr.opcode == "in":
return True
if "mov" in cur_instr.opcode and (Instruction.is_constant(cur_instr.op2) or
Instruction.get_operand_register_family(cur_instr.op2) is None):
return True
return False
def has_invalid_int_handler(self):
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("int") and last_instr.op1 != "0x80":
return True
return False
def is_rip_relative_indirect_branch(self):
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
if "rip" in last_instr.op1 or "eip" in last_instr.op1:
return True
return False
def contains_static_call(self):
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("call") and Instruction.is_constant(cur_instr.op1):
return True
return False
def is_equal(self, rhs):
return self.offset == rhs.offset and self.instruction_string == rhs.instruction_string
def is_duplicate(self, rhs):
if len(self.instructions) != len(rhs.instructions):
return False
for i in range(len(self.instructions)):
if not self.instructions[i].is_equivalent(rhs.instructions[i]):
return False
return True
def is_JOP_COP_dispatcher(self):
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
if "[" in last_instr.op1 and \
first_instr.opcode in ["inc", "dec", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(last_instr.op1)
arith_target_1 = Instruction.get_operand_register_family(first_instr.op1)
if Instruction.is_constant(first_instr.op2):
additive_value = Instruction.get_operand_as_constant(first_instr.op2)
if additive_value < 1 or additive_value > 32:
return False
arith_target_2 = Instruction.get_operand_register_family(first_instr.op2)
return gpi_target == arith_target_1 and arith_target_1 != arith_target_2
return False
def is_JOP_COP_dataloader(self):
first_instr = self.instructions[0]
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(self.instructions[len(self.instructions) - 1].op1)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target != pop_target
return False
def is_JOP_initializer(self):
return self.instructions[0].opcode.startswith("popa")
def is_JOP_trampoline(self):
first_instr = self.instructions[0]
gpi_target_op = self.instructions[len(self.instructions) - 1].op1
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(gpi_target_op)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target == pop_target and "[" in gpi_target_op
return False
def is_COP_initializer(self):
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions)-1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
if first_instr.opcode.startswith("popa") and call_target not in [1, 2, 3, 5]:
protected_families = [1, 2, 3, call_target]
protected_registers = []
for family in protected_families:
for register in Instruction.register_families[family]:
protected_registers.append(register)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if not cur_instr.creates_value():
continue
if cur_instr.op1 in protected_registers:
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return False
return True
return False
def is_COP_strong_trampoline(self):
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
cnt_pops = 1
last_pop_target = first_instr.op1
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("popa"):
cnt_pops += 1
if cur_instr.opcode == "pop" and "[" not in cur_instr.op1:
cnt_pops += 1
last_pop_target = cur_instr.op1
if cnt_pops > 1 and last_pop_target in Instruction.register_families[call_target]:
return True
return False
def is_COP_intrastack_pivot(self):
first_instr = self.instructions[0]
if first_instr.opcode in ["inc", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
arith_target = Instruction.get_operand_register_family(first_instr.op1)
if arith_target == 7:
if first_instr.op2 is None or "[" not in first_instr.op2:
return True
return False
def check_contains_leave(self):
for i in range(1, len(self.instructions)-1):
if self.instructions[i].opcode == "leave":
self.score += 2.0
return
def check_sp_target_of_operation(self):
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
if not cur_instr.creates_value():
continue
if Instruction.get_operand_register_family(cur_instr.op1) == 7:
if "xchg" in cur_instr.opcode or "mov" in cur_instr.opcode or cur_instr.opcode in ["lea"]:
self.score += 4.0
elif cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
elif cur_instr.opcode == "pop":
self.score += 1.0
else:
self.score += 2.0
def check_negative_sp_offsets(self):
sp_offset = 0
for i in range(len(self.instructions)):
cur_instr = self.instructions[i]
if cur_instr.opcode == "push":
sp_offset -= 8
elif cur_instr.opcode == "pop" and cur_instr.op1 not in Instruction.register_families[7]:
sp_offset += 8
elif cur_instr.opcode in ["add", "adc"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset += Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode in ["sub", "sbb"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset -= Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode == "inc" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset += 1
elif cur_instr.opcode == "dec" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset -= 1
elif cur_instr.opcode.startswith("ret") and cur_instr.op1 is not None:
sp_offset += Instruction.get_operand_as_constant(cur_instr.op1)
if sp_offset < 0:
self.score += 2.0
def check_contains_conditional_op(self):
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("j") and cur_instr.opcode != "jmp":
self.score += 3.0
elif "cmov" in cur_instr.opcode or "cmpxchg" in cur_instr.opcode:
self.score += 2.0
elif "set" in cur_instr.opcode:
self.score += 1.0
def check_register_ops(self):
first_instr = self.instructions[0]
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
first_family = None
else:
first_family = Instruction.get_operand_register_family(first_instr.op1)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if not cur_instr.creates_value():
continue
if first_family is not None and first_family == Instruction.get_operand_register_family(cur_instr.op1):
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 1.5
else:
self.score += 1.0
elif "xchg" not in cur_instr.opcode and cur_instr.opcode != "pop":
if cur_instr.op2 is not None and Instruction.get_operand_register_family(cur_instr.op2) is not None:
self.score += 1.0
else:
self.score += 0.5
def check_branch_target_of_operation(self):
last_instr = self.instructions[len(self.instructions)-1]
target_family = Instruction.get_operand_register_family(last_instr.op1)
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
if not cur_instr.creates_value():
continue
if Instruction.get_operand_register_family(cur_instr.op1) == target_family:
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
else:
self.score += 2.0
def check_memory_writes(self):
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
if not cur_instr.creates_value():
continue
if "xchg" in cur_instr.opcode and ("[" in cur_instr.op1 or "[" in cur_instr.op2):
self.score += 1.0
elif cur_instr.op1 is not None and "[" in cur_instr.op1:
self.score += 1.0
| true
| true
|
7906be44b86703163ad627edc3d37ab9a8099270
| 3,129
|
py
|
Python
|
webui/rest/rest.py
|
hirolovesbeer/hayabusa2
|
8cf17d7a629af743d983e4506d519d853b2edffc
|
[
"MIT"
] | 9
|
2018-11-02T05:07:23.000Z
|
2020-01-21T08:23:56.000Z
|
webui/rest/rest.py
|
hirolovesbeer/hayabusa2
|
8cf17d7a629af743d983e4506d519d853b2edffc
|
[
"MIT"
] | null | null | null |
webui/rest/rest.py
|
hirolovesbeer/hayabusa2
|
8cf17d7a629af743d983e4506d519d853b2edffc
|
[
"MIT"
] | 1
|
2019-02-04T01:42:03.000Z
|
2019-02-04T01:42:03.000Z
|
#
# run this command
# $ FLASK_APP=rest.py flask run
#
# request like this
# curl -X POST -H 'Accept:application/json' -H 'Content-Type:application/json' -d '{"start-time":"2019-05-08 09:15", "end-time":"2019-05-08 09:30", "match":"error", "user":"syslog", "password":"mvEPMNThq94LQuys68gR", "count":"true", "sum":"false", "exact":"false"}' localhost:5000/
#
import os
import sys
import tempfile
sys.path.append(os.path.join(os.path.dirname(__file__), '../../lib'))
import logging
from logging.handlers import SysLogHandler
from hayabusa import HayabusaBase
from hayabusa.errors import HayabusaError, CLIClientError
from hayabusa.rest_client import RESTClient
from flask import Flask, request, jsonify
app = Flask(__name__)
def print_result(stderr, stdout, count, sum):
if stderr:
return eys.stderr.write(stderr.rstrip() + '\n')
if stdout:
if count and sum:
return sys.stdout.write(stdout + '\n')
else:
with tempfile.TemporaryFile() as f:
f.write(stdout.encode('utf-8'))
f.seek(0)
max_lines = 100
lines = f.readlines(max_lines)
while lines:
for line in lines:
if line == b'\n':
continue
sys.stdout.write(line.decode('utf-8'))
lines = f.readlines(max_lines)
@app.route('/', methods=['POST'])
def post_json():
json = request.get_json()
start_time = json['start-time']
end_time = json['end-time']
match = json['match']
user = json['user']
password = json['password']
count = True if json['count'].lower() == 'true' else False
sum = True if json['sum'].lower() == 'true' else False
exact = True if json['exact'].lower() == 'true' else False
stdout = ''
stderr = ''
exit_status = None
data = None
request_id = None
HB = HayabusaBase()
config = HB.load_config()
print(config)
logger = HB.set_logger('hayabusa-restapi', logging.DEBUG, False)
try:
client = RESTClient(config, logger)
request_id, data = client.search(user, password, match,
start_time, end_time,
count, sum, exact)
try:
stdout = data['stdout']
stderr = data['stderr']
exit_status = data['exit_status']
except KeyError as e:
raise CLIClientError('Not Found %s in Received Data' % e)
if type(exit_status) != int:
err = 'Invalid exit status (not int) Received: %s (type: %s)'
raise CLIClientError(err % (exit_status, type(exit_status)))
except HayabusaError as e:
sys.stderr.write('%s: %s\n' % (e.__class__.__name__, e))
exit(1)
except Exception as e:
sys.stderr.write('Unexpected Error: %s, %s\n\n' %
(e.__class__.__name__, e))
raise
result = {}
result['result'] = data['stdout']
result['error'] = data['stderr']
return jsonify(result)
| 32.257732
| 282
| 0.569191
|
import os
import sys
import tempfile
sys.path.append(os.path.join(os.path.dirname(__file__), '../../lib'))
import logging
from logging.handlers import SysLogHandler
from hayabusa import HayabusaBase
from hayabusa.errors import HayabusaError, CLIClientError
from hayabusa.rest_client import RESTClient
from flask import Flask, request, jsonify
app = Flask(__name__)
def print_result(stderr, stdout, count, sum):
if stderr:
return eys.stderr.write(stderr.rstrip() + '\n')
if stdout:
if count and sum:
return sys.stdout.write(stdout + '\n')
else:
with tempfile.TemporaryFile() as f:
f.write(stdout.encode('utf-8'))
f.seek(0)
max_lines = 100
lines = f.readlines(max_lines)
while lines:
for line in lines:
if line == b'\n':
continue
sys.stdout.write(line.decode('utf-8'))
lines = f.readlines(max_lines)
@app.route('/', methods=['POST'])
def post_json():
json = request.get_json()
start_time = json['start-time']
end_time = json['end-time']
match = json['match']
user = json['user']
password = json['password']
count = True if json['count'].lower() == 'true' else False
sum = True if json['sum'].lower() == 'true' else False
exact = True if json['exact'].lower() == 'true' else False
stdout = ''
stderr = ''
exit_status = None
data = None
request_id = None
HB = HayabusaBase()
config = HB.load_config()
print(config)
logger = HB.set_logger('hayabusa-restapi', logging.DEBUG, False)
try:
client = RESTClient(config, logger)
request_id, data = client.search(user, password, match,
start_time, end_time,
count, sum, exact)
try:
stdout = data['stdout']
stderr = data['stderr']
exit_status = data['exit_status']
except KeyError as e:
raise CLIClientError('Not Found %s in Received Data' % e)
if type(exit_status) != int:
err = 'Invalid exit status (not int) Received: %s (type: %s)'
raise CLIClientError(err % (exit_status, type(exit_status)))
except HayabusaError as e:
sys.stderr.write('%s: %s\n' % (e.__class__.__name__, e))
exit(1)
except Exception as e:
sys.stderr.write('Unexpected Error: %s, %s\n\n' %
(e.__class__.__name__, e))
raise
result = {}
result['result'] = data['stdout']
result['error'] = data['stderr']
return jsonify(result)
| true
| true
|
7906bf1c9c5eaae4b80ef6c8de2a027019beb855
| 7,892
|
py
|
Python
|
runtime/python/Lib/ctypes/test/test_cfuncs.py
|
hwaipy/InteractionFreeNode
|
88642b68430f57b028fd0f276a5709f89279e30d
|
[
"MIT"
] | 207
|
2018-10-01T08:53:01.000Z
|
2022-03-14T12:15:54.000Z
|
Thonny/Lib/ctypes/test/test_cfuncs.py
|
Pydiderot/pydiderotIDE
|
a42fcde3ea837ae40c957469f5d87427e8ce46d3
|
[
"MIT"
] | 30
|
2019-01-04T10:14:56.000Z
|
2020-10-12T14:00:31.000Z
|
Thonny/Lib/ctypes/test/test_cfuncs.py
|
Pydiderot/pydiderotIDE
|
a42fcde3ea837ae40c957469f5d87427e8ce46d3
|
[
"MIT"
] | 76
|
2020-03-16T01:47:46.000Z
|
2022-03-21T16:37:07.000Z
|
# A lot of failures in these tests on Mac OS X.
# Byte order related?
import unittest
from ctypes import *
from ctypes.test import need_symbol
import _ctypes_test
class CFunctions(unittest.TestCase):
_dll = CDLL(_ctypes_test.__file__)
def S(self):
return c_longlong.in_dll(self._dll, "last_tf_arg_s").value
def U(self):
return c_ulonglong.in_dll(self._dll, "last_tf_arg_u").value
def test_byte(self):
self._dll.tf_b.restype = c_byte
self._dll.tf_b.argtypes = (c_byte,)
self.assertEqual(self._dll.tf_b(-126), -42)
self.assertEqual(self.S(), -126)
def test_byte_plus(self):
self._dll.tf_bb.restype = c_byte
self._dll.tf_bb.argtypes = (c_byte, c_byte)
self.assertEqual(self._dll.tf_bb(0, -126), -42)
self.assertEqual(self.S(), -126)
def test_ubyte(self):
self._dll.tf_B.restype = c_ubyte
self._dll.tf_B.argtypes = (c_ubyte,)
self.assertEqual(self._dll.tf_B(255), 85)
self.assertEqual(self.U(), 255)
def test_ubyte_plus(self):
self._dll.tf_bB.restype = c_ubyte
self._dll.tf_bB.argtypes = (c_byte, c_ubyte)
self.assertEqual(self._dll.tf_bB(0, 255), 85)
self.assertEqual(self.U(), 255)
def test_short(self):
self._dll.tf_h.restype = c_short
self._dll.tf_h.argtypes = (c_short,)
self.assertEqual(self._dll.tf_h(-32766), -10922)
self.assertEqual(self.S(), -32766)
def test_short_plus(self):
self._dll.tf_bh.restype = c_short
self._dll.tf_bh.argtypes = (c_byte, c_short)
self.assertEqual(self._dll.tf_bh(0, -32766), -10922)
self.assertEqual(self.S(), -32766)
def test_ushort(self):
self._dll.tf_H.restype = c_ushort
self._dll.tf_H.argtypes = (c_ushort,)
self.assertEqual(self._dll.tf_H(65535), 21845)
self.assertEqual(self.U(), 65535)
def test_ushort_plus(self):
self._dll.tf_bH.restype = c_ushort
self._dll.tf_bH.argtypes = (c_byte, c_ushort)
self.assertEqual(self._dll.tf_bH(0, 65535), 21845)
self.assertEqual(self.U(), 65535)
def test_int(self):
self._dll.tf_i.restype = c_int
self._dll.tf_i.argtypes = (c_int,)
self.assertEqual(self._dll.tf_i(-2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_int_plus(self):
self._dll.tf_bi.restype = c_int
self._dll.tf_bi.argtypes = (c_byte, c_int)
self.assertEqual(self._dll.tf_bi(0, -2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_uint(self):
self._dll.tf_I.restype = c_uint
self._dll.tf_I.argtypes = (c_uint,)
self.assertEqual(self._dll.tf_I(4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_uint_plus(self):
self._dll.tf_bI.restype = c_uint
self._dll.tf_bI.argtypes = (c_byte, c_uint)
self.assertEqual(self._dll.tf_bI(0, 4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_long(self):
self._dll.tf_l.restype = c_long
self._dll.tf_l.argtypes = (c_long,)
self.assertEqual(self._dll.tf_l(-2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_long_plus(self):
self._dll.tf_bl.restype = c_long
self._dll.tf_bl.argtypes = (c_byte, c_long)
self.assertEqual(self._dll.tf_bl(0, -2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_ulong(self):
self._dll.tf_L.restype = c_ulong
self._dll.tf_L.argtypes = (c_ulong,)
self.assertEqual(self._dll.tf_L(4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_ulong_plus(self):
self._dll.tf_bL.restype = c_ulong
self._dll.tf_bL.argtypes = (c_char, c_ulong)
self.assertEqual(self._dll.tf_bL(b' ', 4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_longlong(self):
self._dll.tf_q.restype = c_longlong
self._dll.tf_q.argtypes = (c_longlong, )
self.assertEqual(self._dll.tf_q(-9223372036854775806), -3074457345618258602)
self.assertEqual(self.S(), -9223372036854775806)
def test_longlong_plus(self):
self._dll.tf_bq.restype = c_longlong
self._dll.tf_bq.argtypes = (c_byte, c_longlong)
self.assertEqual(self._dll.tf_bq(0, -9223372036854775806), -3074457345618258602)
self.assertEqual(self.S(), -9223372036854775806)
def test_ulonglong(self):
self._dll.tf_Q.restype = c_ulonglong
self._dll.tf_Q.argtypes = (c_ulonglong, )
self.assertEqual(self._dll.tf_Q(18446744073709551615), 6148914691236517205)
self.assertEqual(self.U(), 18446744073709551615)
def test_ulonglong_plus(self):
self._dll.tf_bQ.restype = c_ulonglong
self._dll.tf_bQ.argtypes = (c_byte, c_ulonglong)
self.assertEqual(self._dll.tf_bQ(0, 18446744073709551615), 6148914691236517205)
self.assertEqual(self.U(), 18446744073709551615)
def test_float(self):
self._dll.tf_f.restype = c_float
self._dll.tf_f.argtypes = (c_float,)
self.assertEqual(self._dll.tf_f(-42.), -14.)
self.assertEqual(self.S(), -42)
def test_float_plus(self):
self._dll.tf_bf.restype = c_float
self._dll.tf_bf.argtypes = (c_byte, c_float)
self.assertEqual(self._dll.tf_bf(0, -42.), -14.)
self.assertEqual(self.S(), -42)
def test_double(self):
self._dll.tf_d.restype = c_double
self._dll.tf_d.argtypes = (c_double,)
self.assertEqual(self._dll.tf_d(42.), 14.)
self.assertEqual(self.S(), 42)
def test_double_plus(self):
self._dll.tf_bd.restype = c_double
self._dll.tf_bd.argtypes = (c_byte, c_double)
self.assertEqual(self._dll.tf_bd(0, 42.), 14.)
self.assertEqual(self.S(), 42)
def test_longdouble(self):
self._dll.tf_D.restype = c_longdouble
self._dll.tf_D.argtypes = (c_longdouble,)
self.assertEqual(self._dll.tf_D(42.), 14.)
self.assertEqual(self.S(), 42)
def test_longdouble_plus(self):
self._dll.tf_bD.restype = c_longdouble
self._dll.tf_bD.argtypes = (c_byte, c_longdouble)
self.assertEqual(self._dll.tf_bD(0, 42.), 14.)
self.assertEqual(self.S(), 42)
def test_callwithresult(self):
def process_result(result):
return result * 2
self._dll.tf_i.restype = process_result
self._dll.tf_i.argtypes = (c_int,)
self.assertEqual(self._dll.tf_i(42), 28)
self.assertEqual(self.S(), 42)
self.assertEqual(self._dll.tf_i(-42), -28)
self.assertEqual(self.S(), -42)
def test_void(self):
self._dll.tv_i.restype = None
self._dll.tv_i.argtypes = (c_int,)
self.assertEqual(self._dll.tv_i(42), None)
self.assertEqual(self.S(), 42)
self.assertEqual(self._dll.tv_i(-42), None)
self.assertEqual(self.S(), -42)
# The following repeats the above tests with stdcall functions (where
# they are available)
try:
WinDLL
except NameError:
def stdcall_dll(*_): pass
else:
class stdcall_dll(WinDLL):
def __getattr__(self, name):
if name[:2] == '__' and name[-2:] == '__':
raise AttributeError(name)
func = self._FuncPtr(("s_" + name, self))
setattr(self, name, func)
return func
@need_symbol('WinDLL')
class stdcallCFunctions(CFunctions):
_dll = stdcall_dll(_ctypes_test.__file__)
if __name__ == '__main__':
unittest.main()
| 37.051643
| 89
| 0.628865
|
import unittest
from ctypes import *
from ctypes.test import need_symbol
import _ctypes_test
class CFunctions(unittest.TestCase):
_dll = CDLL(_ctypes_test.__file__)
def S(self):
return c_longlong.in_dll(self._dll, "last_tf_arg_s").value
def U(self):
return c_ulonglong.in_dll(self._dll, "last_tf_arg_u").value
def test_byte(self):
self._dll.tf_b.restype = c_byte
self._dll.tf_b.argtypes = (c_byte,)
self.assertEqual(self._dll.tf_b(-126), -42)
self.assertEqual(self.S(), -126)
def test_byte_plus(self):
self._dll.tf_bb.restype = c_byte
self._dll.tf_bb.argtypes = (c_byte, c_byte)
self.assertEqual(self._dll.tf_bb(0, -126), -42)
self.assertEqual(self.S(), -126)
def test_ubyte(self):
self._dll.tf_B.restype = c_ubyte
self._dll.tf_B.argtypes = (c_ubyte,)
self.assertEqual(self._dll.tf_B(255), 85)
self.assertEqual(self.U(), 255)
def test_ubyte_plus(self):
self._dll.tf_bB.restype = c_ubyte
self._dll.tf_bB.argtypes = (c_byte, c_ubyte)
self.assertEqual(self._dll.tf_bB(0, 255), 85)
self.assertEqual(self.U(), 255)
def test_short(self):
self._dll.tf_h.restype = c_short
self._dll.tf_h.argtypes = (c_short,)
self.assertEqual(self._dll.tf_h(-32766), -10922)
self.assertEqual(self.S(), -32766)
def test_short_plus(self):
self._dll.tf_bh.restype = c_short
self._dll.tf_bh.argtypes = (c_byte, c_short)
self.assertEqual(self._dll.tf_bh(0, -32766), -10922)
self.assertEqual(self.S(), -32766)
def test_ushort(self):
self._dll.tf_H.restype = c_ushort
self._dll.tf_H.argtypes = (c_ushort,)
self.assertEqual(self._dll.tf_H(65535), 21845)
self.assertEqual(self.U(), 65535)
def test_ushort_plus(self):
self._dll.tf_bH.restype = c_ushort
self._dll.tf_bH.argtypes = (c_byte, c_ushort)
self.assertEqual(self._dll.tf_bH(0, 65535), 21845)
self.assertEqual(self.U(), 65535)
def test_int(self):
self._dll.tf_i.restype = c_int
self._dll.tf_i.argtypes = (c_int,)
self.assertEqual(self._dll.tf_i(-2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_int_plus(self):
self._dll.tf_bi.restype = c_int
self._dll.tf_bi.argtypes = (c_byte, c_int)
self.assertEqual(self._dll.tf_bi(0, -2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_uint(self):
self._dll.tf_I.restype = c_uint
self._dll.tf_I.argtypes = (c_uint,)
self.assertEqual(self._dll.tf_I(4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_uint_plus(self):
self._dll.tf_bI.restype = c_uint
self._dll.tf_bI.argtypes = (c_byte, c_uint)
self.assertEqual(self._dll.tf_bI(0, 4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_long(self):
self._dll.tf_l.restype = c_long
self._dll.tf_l.argtypes = (c_long,)
self.assertEqual(self._dll.tf_l(-2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_long_plus(self):
self._dll.tf_bl.restype = c_long
self._dll.tf_bl.argtypes = (c_byte, c_long)
self.assertEqual(self._dll.tf_bl(0, -2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_ulong(self):
self._dll.tf_L.restype = c_ulong
self._dll.tf_L.argtypes = (c_ulong,)
self.assertEqual(self._dll.tf_L(4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_ulong_plus(self):
self._dll.tf_bL.restype = c_ulong
self._dll.tf_bL.argtypes = (c_char, c_ulong)
self.assertEqual(self._dll.tf_bL(b' ', 4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_longlong(self):
self._dll.tf_q.restype = c_longlong
self._dll.tf_q.argtypes = (c_longlong, )
self.assertEqual(self._dll.tf_q(-9223372036854775806), -3074457345618258602)
self.assertEqual(self.S(), -9223372036854775806)
def test_longlong_plus(self):
self._dll.tf_bq.restype = c_longlong
self._dll.tf_bq.argtypes = (c_byte, c_longlong)
self.assertEqual(self._dll.tf_bq(0, -9223372036854775806), -3074457345618258602)
self.assertEqual(self.S(), -9223372036854775806)
def test_ulonglong(self):
self._dll.tf_Q.restype = c_ulonglong
self._dll.tf_Q.argtypes = (c_ulonglong, )
self.assertEqual(self._dll.tf_Q(18446744073709551615), 6148914691236517205)
self.assertEqual(self.U(), 18446744073709551615)
def test_ulonglong_plus(self):
self._dll.tf_bQ.restype = c_ulonglong
self._dll.tf_bQ.argtypes = (c_byte, c_ulonglong)
self.assertEqual(self._dll.tf_bQ(0, 18446744073709551615), 6148914691236517205)
self.assertEqual(self.U(), 18446744073709551615)
def test_float(self):
self._dll.tf_f.restype = c_float
self._dll.tf_f.argtypes = (c_float,)
self.assertEqual(self._dll.tf_f(-42.), -14.)
self.assertEqual(self.S(), -42)
def test_float_plus(self):
self._dll.tf_bf.restype = c_float
self._dll.tf_bf.argtypes = (c_byte, c_float)
self.assertEqual(self._dll.tf_bf(0, -42.), -14.)
self.assertEqual(self.S(), -42)
def test_double(self):
self._dll.tf_d.restype = c_double
self._dll.tf_d.argtypes = (c_double,)
self.assertEqual(self._dll.tf_d(42.), 14.)
self.assertEqual(self.S(), 42)
def test_double_plus(self):
self._dll.tf_bd.restype = c_double
self._dll.tf_bd.argtypes = (c_byte, c_double)
self.assertEqual(self._dll.tf_bd(0, 42.), 14.)
self.assertEqual(self.S(), 42)
def test_longdouble(self):
self._dll.tf_D.restype = c_longdouble
self._dll.tf_D.argtypes = (c_longdouble,)
self.assertEqual(self._dll.tf_D(42.), 14.)
self.assertEqual(self.S(), 42)
def test_longdouble_plus(self):
self._dll.tf_bD.restype = c_longdouble
self._dll.tf_bD.argtypes = (c_byte, c_longdouble)
self.assertEqual(self._dll.tf_bD(0, 42.), 14.)
self.assertEqual(self.S(), 42)
def test_callwithresult(self):
def process_result(result):
return result * 2
self._dll.tf_i.restype = process_result
self._dll.tf_i.argtypes = (c_int,)
self.assertEqual(self._dll.tf_i(42), 28)
self.assertEqual(self.S(), 42)
self.assertEqual(self._dll.tf_i(-42), -28)
self.assertEqual(self.S(), -42)
def test_void(self):
self._dll.tv_i.restype = None
self._dll.tv_i.argtypes = (c_int,)
self.assertEqual(self._dll.tv_i(42), None)
self.assertEqual(self.S(), 42)
self.assertEqual(self._dll.tv_i(-42), None)
self.assertEqual(self.S(), -42)
try:
WinDLL
except NameError:
def stdcall_dll(*_): pass
else:
class stdcall_dll(WinDLL):
def __getattr__(self, name):
if name[:2] == '__' and name[-2:] == '__':
raise AttributeError(name)
func = self._FuncPtr(("s_" + name, self))
setattr(self, name, func)
return func
@need_symbol('WinDLL')
class stdcallCFunctions(CFunctions):
_dll = stdcall_dll(_ctypes_test.__file__)
if __name__ == '__main__':
unittest.main()
| true
| true
|
7906bf4e9571695a2376caedc71b4af44619e399
| 4,994
|
py
|
Python
|
sciencebeam_parser/document/tei/section.py
|
elifesciences/sciencebeam-parser
|
66964f283612b8d6fa8a23ad8790292c1ec07651
|
[
"MIT"
] | 13
|
2021-08-04T12:11:17.000Z
|
2022-03-28T20:41:20.000Z
|
sciencebeam_parser/document/tei/section.py
|
elifesciences/sciencebeam-parser
|
66964f283612b8d6fa8a23ad8790292c1ec07651
|
[
"MIT"
] | 33
|
2021-08-05T08:37:59.000Z
|
2022-03-29T18:42:09.000Z
|
sciencebeam_parser/document/tei/section.py
|
elifesciences/sciencebeam-parser
|
66964f283612b8d6fa8a23ad8790292c1ec07651
|
[
"MIT"
] | 1
|
2022-01-05T14:53:06.000Z
|
2022-01-05T14:53:06.000Z
|
import logging
from typing import (
Iterable,
List,
)
from lxml import etree
from sciencebeam_parser.document.semantic_document import (
SemanticContentWrapper,
SemanticFigure,
SemanticHeading,
SemanticLabel,
SemanticParagraph,
SemanticRawEquation,
SemanticSection,
SemanticSectionTypes,
SemanticTable
)
from sciencebeam_parser.document.tei.common import (
TEI_E,
TeiElementBuilder
)
from sciencebeam_parser.document.tei.factory import (
SingleElementTeiElementFactory,
T_ElementChildrenList,
TeiElementFactory,
TeiElementFactoryContext
)
LOGGER = logging.getLogger(__name__)
class HeadingTeiElementFactory(SingleElementTeiElementFactory):
def get_tei_element_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticHeading)
semantic_heading = semantic_content
children: T_ElementChildrenList = [
context.get_default_attributes_for_semantic_content(semantic_heading)
]
pending_whitespace = ''
for child_semantic_content in semantic_heading:
if isinstance(child_semantic_content, SemanticLabel):
children.append({'n': child_semantic_content.get_text()})
continue
layout_block = child_semantic_content.merged_block
if pending_whitespace:
children.append(pending_whitespace)
children.extend(context.iter_layout_block_tei_children(
layout_block=layout_block,
enable_coordinates=False
))
pending_whitespace = layout_block.whitespace
return TEI_E('head', *children)
def iter_flat_paragraph_formula(
semantic_paragraph: SemanticParagraph
) -> Iterable[SemanticContentWrapper]:
pending_semantic_content_list: List[SemanticContentWrapper] = []
for semantic_content in semantic_paragraph:
if isinstance(semantic_content, SemanticRawEquation):
if pending_semantic_content_list:
yield SemanticParagraph(pending_semantic_content_list)
pending_semantic_content_list = []
yield semantic_content
continue
pending_semantic_content_list.append(semantic_content)
if pending_semantic_content_list:
yield SemanticParagraph(pending_semantic_content_list)
class ParagraphTeiElementFactory(TeiElementFactory):
def get_tei_children_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> List[etree.ElementBase]:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticParagraph)
semantic_paragraph = semantic_content
result: List[etree.ElementBase] = []
for flat_parent_semantic_content in iter_flat_paragraph_formula(semantic_paragraph):
if not isinstance(flat_parent_semantic_content, SemanticParagraph):
result.extend(context.get_tei_child_elements_for_semantic_content(
flat_parent_semantic_content
))
continue
children: T_ElementChildrenList = [
context.get_default_attributes_for_semantic_content(flat_parent_semantic_content)
]
pending_whitespace = ''
for child_semantic_content in flat_parent_semantic_content:
pending_whitespace = context.append_tei_children_list_and_get_whitespace(
children,
child_semantic_content,
pending_whitespace=pending_whitespace
)
result.append(TEI_E('p', *children))
return result
class SectionTeiElementFactory(TeiElementFactory):
def get_tei_children_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> List[etree.ElementBase]:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticSection)
semantic_section = semantic_content
tei_section = TeiElementBuilder(TEI_E('div'))
for child_semantic_content in semantic_section:
if isinstance(child_semantic_content, (SemanticFigure, SemanticTable,)):
# rendered at parent level
continue
tei_section.extend(context.get_tei_child_elements_for_semantic_content(
child_semantic_content
))
if semantic_content.section_type == SemanticSectionTypes.ACKNOWLEDGEMENT:
tei_section.element.attrib['type'] = 'acknowledgement'
if not list(tei_section.element):
return []
return [tei_section.element]
| 38.122137
| 97
| 0.696235
|
import logging
from typing import (
Iterable,
List,
)
from lxml import etree
from sciencebeam_parser.document.semantic_document import (
SemanticContentWrapper,
SemanticFigure,
SemanticHeading,
SemanticLabel,
SemanticParagraph,
SemanticRawEquation,
SemanticSection,
SemanticSectionTypes,
SemanticTable
)
from sciencebeam_parser.document.tei.common import (
TEI_E,
TeiElementBuilder
)
from sciencebeam_parser.document.tei.factory import (
SingleElementTeiElementFactory,
T_ElementChildrenList,
TeiElementFactory,
TeiElementFactoryContext
)
LOGGER = logging.getLogger(__name__)
class HeadingTeiElementFactory(SingleElementTeiElementFactory):
def get_tei_element_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticHeading)
semantic_heading = semantic_content
children: T_ElementChildrenList = [
context.get_default_attributes_for_semantic_content(semantic_heading)
]
pending_whitespace = ''
for child_semantic_content in semantic_heading:
if isinstance(child_semantic_content, SemanticLabel):
children.append({'n': child_semantic_content.get_text()})
continue
layout_block = child_semantic_content.merged_block
if pending_whitespace:
children.append(pending_whitespace)
children.extend(context.iter_layout_block_tei_children(
layout_block=layout_block,
enable_coordinates=False
))
pending_whitespace = layout_block.whitespace
return TEI_E('head', *children)
def iter_flat_paragraph_formula(
semantic_paragraph: SemanticParagraph
) -> Iterable[SemanticContentWrapper]:
pending_semantic_content_list: List[SemanticContentWrapper] = []
for semantic_content in semantic_paragraph:
if isinstance(semantic_content, SemanticRawEquation):
if pending_semantic_content_list:
yield SemanticParagraph(pending_semantic_content_list)
pending_semantic_content_list = []
yield semantic_content
continue
pending_semantic_content_list.append(semantic_content)
if pending_semantic_content_list:
yield SemanticParagraph(pending_semantic_content_list)
class ParagraphTeiElementFactory(TeiElementFactory):
def get_tei_children_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> List[etree.ElementBase]:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticParagraph)
semantic_paragraph = semantic_content
result: List[etree.ElementBase] = []
for flat_parent_semantic_content in iter_flat_paragraph_formula(semantic_paragraph):
if not isinstance(flat_parent_semantic_content, SemanticParagraph):
result.extend(context.get_tei_child_elements_for_semantic_content(
flat_parent_semantic_content
))
continue
children: T_ElementChildrenList = [
context.get_default_attributes_for_semantic_content(flat_parent_semantic_content)
]
pending_whitespace = ''
for child_semantic_content in flat_parent_semantic_content:
pending_whitespace = context.append_tei_children_list_and_get_whitespace(
children,
child_semantic_content,
pending_whitespace=pending_whitespace
)
result.append(TEI_E('p', *children))
return result
class SectionTeiElementFactory(TeiElementFactory):
def get_tei_children_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> List[etree.ElementBase]:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticSection)
semantic_section = semantic_content
tei_section = TeiElementBuilder(TEI_E('div'))
for child_semantic_content in semantic_section:
if isinstance(child_semantic_content, (SemanticFigure, SemanticTable,)):
continue
tei_section.extend(context.get_tei_child_elements_for_semantic_content(
child_semantic_content
))
if semantic_content.section_type == SemanticSectionTypes.ACKNOWLEDGEMENT:
tei_section.element.attrib['type'] = 'acknowledgement'
if not list(tei_section.element):
return []
return [tei_section.element]
| true
| true
|
7906c00e513938c81191185281255d8fcb089574
| 721
|
py
|
Python
|
fake.py
|
ShuaiGao/mini-shop-server
|
8a72b2d457bba8778e97637027ffa82bfa11e8a9
|
[
"MIT"
] | 1
|
2020-06-13T06:57:53.000Z
|
2020-06-13T06:57:53.000Z
|
fake.py
|
zyxyuanxiao/mini-shop-server
|
90eb5a36b75e680c6f5fe324261fe0c53373cf5a
|
[
"MIT"
] | 1
|
2019-07-08T12:32:29.000Z
|
2019-07-08T12:32:29.000Z
|
fake.py
|
ShuaiGao/mini-shop-server
|
8a72b2d457bba8778e97637027ffa82bfa11e8a9
|
[
"MIT"
] | null | null | null |
# _*_ coding: utf-8 _*_
"""
Created by Allen7D on 2018/5/12.
"""
from app import create_app
__author__ = 'Allen7D'
from app.models.base import db
from app.models.user import User
app = create_app()
with app.app_context():
with db.auto_commit():
# 创建一个超级管理员
user = User()
user.openid = '999'
user.email = '999@qq.com'
user.nickname = 'Super'
user.auth = 2
user.password = '123456'
db.session.add(user)
with db.auto_commit():
# 创建一个普通管理员
user = User()
user.openid = '777'
user.email = '777@qq.com'
user.nickname = 'Admin'
user.auth = 1
user.password = '123456'
db.session.add(user)
| 22.53125
| 34
| 0.568655
|
from app import create_app
__author__ = 'Allen7D'
from app.models.base import db
from app.models.user import User
app = create_app()
with app.app_context():
with db.auto_commit():
user = User()
user.openid = '999'
user.email = '999@qq.com'
user.nickname = 'Super'
user.auth = 2
user.password = '123456'
db.session.add(user)
with db.auto_commit():
user = User()
user.openid = '777'
user.email = '777@qq.com'
user.nickname = 'Admin'
user.auth = 1
user.password = '123456'
db.session.add(user)
| true
| true
|
7906c054b3ed392cc4df1a4c5ae03c4d67c39ae5
| 37,606
|
py
|
Python
|
mushroom_rl/core/parallelization_tools/step_sequence.py
|
nifunk/GNNMushroomRL
|
d0d8eefdc10bca62e7cb536d65ea619607be755b
|
[
"MIT"
] | 1
|
2022-02-06T22:04:42.000Z
|
2022-02-06T22:04:42.000Z
|
mushroom_rl/core/parallelization_tools/step_sequence.py
|
nifunk/GNNMushroomRL
|
d0d8eefdc10bca62e7cb536d65ea619607be755b
|
[
"MIT"
] | null | null | null |
mushroom_rl/core/parallelization_tools/step_sequence.py
|
nifunk/GNNMushroomRL
|
d0d8eefdc10bca62e7cb536d65ea619607be755b
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
import numpy as np
import operator
import random
import scipy.signal as signal
import torch as to
from collections.abc import Iterable
from copy import deepcopy
from math import ceil
from typing import Sequence, Type, Optional, Union, Callable, Tuple
import pyrado
from pyrado.sampling.data_format import stack_to_format, to_format, cat_to_format, new_tuple
from pyrado.sampling.utils import gen_shuffled_batch_idcs, gen_ordered_batch_idcs
def _index_to_int(idx, n):
# Index conversion
idx = operator.index(idx)
# Check negative index
if idx < 0:
idx += n
# Check bounds
if idx < 0 or idx >= n:
raise IndexError
return idx
class DictIndexProxy:
""" Views a slice through a dict of lists or tensors. """
__slots__ = ("__dict__", "_obj", "_index", "_prefix")
def __init__(self, obj: dict, index: int, path: Optional[str] = None):
super().__init__()
self._obj = obj
self._index = index
if path:
self._prefix = path + "."
else:
self._prefix = ""
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
return key, index
def _get_keyed_value(self, key, error_type: Type[Exception] = RuntimeError):
# Obtain keyed value from obj dict
value = self._obj.get(key, None)
if value is None:
# Try pluralized keys
value = self._obj.get(key + "s", None)
if value is None:
raise error_type(f"No entry named {self._prefix}{key}")
return value
def _index_value(self, key, value, index, error_type: Type[Exception] = RuntimeError):
# Obtain indexed element from value
if isinstance(value, dict):
# Return subdict proxy
return DictIndexProxy(value, index, self._prefix + key)
elif isinstance(value, tuple):
# Return tuple of slices
# Since we can't proxy a tuple, we slice eagerly
# Use type(value) to support named tuples. (the keys is still index though)
return new_tuple(
type(value), (self._index_value(f"{key}[{i}]", v, index, error_type) for i, v in enumerate(value))
)
elif isinstance(value, (to.Tensor, np.ndarray)):
# Return slice of ndarray / tensor
return value[index, ...]
elif isinstance(value, list):
# Return list item
return value[index]
else:
# Unsupported type
raise error_type(f"Entry {self._prefix}{key} has un-gettable type {type(value)}")
def _get_indexed_value(self, key, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
return self._index_value(key, value, index, error_type)
def _set_indexed_value(self, key, new_value, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
# Set value to data
if isinstance(value, (to.Tensor, np.ndarray)):
# Set slice of ndarray/tensor
value[index, ...] = new_value
elif isinstance(value, list):
# Set list item
value[index] = new_value
else:
# Don't support setting dict proxies
raise error_type(f"Entry {key} has un-settable type {type(value)}")
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError
result = self._get_indexed_value(key, error_type=AttributeError)
self.__dict__[key] = result
return result
def __setattr__(self, key, value):
if not key.startswith("_"):
try:
self._set_indexed_value(key, value, error_type=AttributeError)
except AttributeError:
pass
else:
self.__dict__[key] = value
return
object.__setattr__(self, key, value)
def __dir__(self):
# List dict items not starting with _
return [k for k in self._obj if not k.startswith("_")]
# Define getitem and setitem too, helps when return attr is a keyword
def __getitem__(self, key):
result = self._get_indexed_value(key, error_type=KeyError)
self.__dict__[key] = result
return result
def __setitem__(self, key, value):
self._set_indexed_value(key, value, error_type=KeyError)
self.__dict__[key] = value
# Serialize only dict and index
def __getstate__(self):
return {"obj", self._obj, "index", self._index}
def __setstate__(self, state):
self._obj = state["obj"]
self._index = state["index"]
class Step(DictIndexProxy):
"""
A single step in a rollout.
This object is a proxy, referring a specific index in the rollout. When querying an attribute from the step,
it will try to return the corresponding slice from the rollout. Additionally, one can prefix attributes with `next_`
to access the value for the next step, i.e. `next_observations` the observation made at the start of the next step.
"""
__slots__ = "_rollout"
def __init__(self, rollout, index):
"""
Constructor
:param rollout: `StepSequence` object to which this step belongs
:param index: index of this step in the rollout
"""
# Call DictIndexProxy's constructor
super(Step, self).__init__(rollout.__dict__, index)
self._rollout = rollout
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
if key.startswith("next_"):
if not self._rollout.continuous:
raise error_type("Access to next element is not supported for non-continuous rollouts!")
key = key[5:]
index += 1
if key not in self._rollout.data_names and key + "s" not in self._rollout.data_names and key != "done":
raise error_type(f"No such rollout data field: {key}")
return key, index
# Serialize rollout and index
def __getstate__(self):
return {"rollout", self._rollout, "index", self._index}
def __setstate__(self, state):
self._rollout = state["rollout"]
self._obj = self._rollout.__dict__
self._index = state["index"]
class StepSequence(Sequence[Step]):
"""
A sequence of steps.
During the rollout, the values of different variables are recorded. This class provides efficient storage and
access for these values. The constructor accepts a list of step entries for each variable. For every step,
the list should contain a Tensor/ndarray of values for that step. The shape of these tensors must be the same for
all step entries. The passed tensors are then stacked, so that the first dimension is the step count.
Some values, like the observations, can have one more element then there are steps to encode the state after the
last step. Additionally, the step entries may be dicts to support keyed storage. A list of dicts is converted to
a dict of lists, each of which will be regularly stacked. Apart from the variable-based view, the rollout can also
be seen as a sequence of steps. Each Step object is a proxy, it's attributes refer to the respective slice of the
corresponding variable. The only required result variable are `rewards`, observations`, and `actions`.
All other variables are optional. Common optional ones are `states` and `rollout_info`.
.. note::
Storing PyTorch tensors with gradient tracing is NOT supported. The rationale behind this is eager error
avoidance. The only reason you would add them is to profit from the optimized slicing, but using that with
gradient tracking risks lingering incomplete graphs.
"""
rewards: Union[np.ndarray, to.Tensor]
observations: Union[np.ndarray, to.Tensor]
actions: Union[np.ndarray, to.Tensor]
# Set of required rollout fields in addition to rewards, observations, actions. Putting this into a class field
# instead of using the constructor arguments reduces duplicate code and allows to override it during unit tests.
required_fields = {}
def __init__(
self,
*,
complete: Optional[bool] = True,
rollout_info=None,
data_format: Optional[str] = None,
done: Optional[np.ndarray] = None,
continuous: Optional[bool] = True,
rollout_bounds=None,
rewards: Sequence,
observations: Sequence,
actions: Sequence,
**data,
):
# print (data)
"""
Constructor
:param complete: `False` if the rollout is incomplete, i.e. as part of a mini-batch
:param rollout_info: data staying constant through the whole episode
:param data_format: 'torch' to use Tensors, 'numpy' to use ndarrays.
Will use Tensors if any data argument does, else ndarrays
:param done: boolean ndarray, specifying for each step whether it led to termination.
The last step of continuous rollouts, i.e. not mini-batches, is done if `complete` is `True`.
:param continuous: true if the steps form one continuous sequence.
:param rewards: sequence of reward values, determines sequence length
:param observations: sequence of observation values, the length must be `len(rewards) + 1`
:param actions: sequence of action values, the length must be `len(rewards)`
:param data: additional data lists, their length must be `len(rewards)` or `len(rewards) + 1`
"""
# Obtain rollout length from reward list
self.length = len(rewards)
if self.length == 0:
raise pyrado.ShapeErr(msg="StepSequence cannot be empty!")
# Set singular attributes
self.rollout_info = rollout_info
self.continuous = continuous
# Infer if this instance is using numpy arrays or PyTorch tensors
if data_format is None:
# We ignore rewards here since it's probably scalar
for value in data.values():
if isinstance(value, to.Tensor) or (isinstance(value, list) and isinstance(value[0], to.Tensor)):
data_format = "torch"
break
else:
# Use numpy by default
data_format = "numpy"
self._data_format = data_format
# Check for missing extra fields
missing_fields = StepSequence.required_fields - data.keys()
if missing_fields:
raise ValueError(f"Missing required data fields: {missing_fields}")
# Set mandatory data fields
self._data_names = []
self.add_data("rewards", rewards)
self.add_data("observations", observations)
self.add_data("actions", actions)
# Set other data fields and verify their length
for name, value in data.items():
self.add_data(name, value)
# Set done list if any. The done list is always a numpy array since torch doesn't support boolean tensors.
if done is None:
done = np.zeros(self.length, dtype=np.bool)
if complete and continuous:
done[-1] = True
else:
done = np.asarray(done, dtype=np.bool)
assert done.shape[0] == self.length
self.done = done
# Compute rollout bounds from done list (yes this is not exactly safe...)
# The bounds list has one extra entry 0, this simplifies queries greatly.
# bounds[i] = start of rollout i; bounds[i+1]=end of rollout i
if continuous:
if rollout_bounds is None:
rollout_bounds = [0]
rollout_bounds.extend(np.flatnonzero(done) + 1)
if not done[-1]:
rollout_bounds.append(self.length)
else:
# Validate externally passed bounds.
for i in range(len(rollout_bounds) - 1):
assert rollout_bounds[i] < rollout_bounds[i + 1]
assert rollout_bounds[0] == 0
assert rollout_bounds[-1] == self.length
self._rollout_bounds = np.array(rollout_bounds)
else:
self._rollout_bounds = None
@property
def data_format(self) -> str:
""" Get the name of data format ('torch' or 'numpy'). """
return self._data_format
@property
def data_names(self) -> Sequence[str]:
""" Get the list of data attribute names. """
return self._data_names
@property
def rollout_bounds(self) -> np.ndarray:
return self._rollout_bounds
@property
def rollout_count(self):
""" Count the number of sub-rollouts inside this step sequence. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
return len(self._rollout_bounds) - 1
@property
def rollout_lengths(self):
""" Lengths of sub-rollouts. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
return bounds[1:] - bounds[:-1]
def __len__(self):
""" Get the step sequence's length. """
return self.length
def __getitem__(self, index):
if isinstance(index, slice) or isinstance(index, Iterable):
# Return a StepSequence object with the subset. Build sliced data dict.
sliced_data = {name: self._slice_entry(self.__dict__[name], index) for name in self._data_names}
sliced_data = {k: v for k, v in sliced_data.items() if v is not None}
# Check if the slice is continuous
continuous = isinstance(index, slice) and (index.step is None or index.step == 1)
rollout_bounds = None
if continuous:
# Slice rollout bounds too.
start, end, _ = index.indices(self.length)
rollout_bounds = [0]
for b in self._rollout_bounds:
if start < b < end:
rollout_bounds.append(b - start)
rollout_bounds.append(end - start)
return StepSequence(
rollout_info=self.rollout_info,
data_format=self._data_format,
done=self.done[index],
continuous=continuous,
rollout_bounds=rollout_bounds,
**sliced_data,
)
# Should be a singular element index. Return step proxy.
return Step(self, _index_to_int(index, self.length))
def __map_tensors(self, mapper, elem):
if isinstance(elem, dict):
# Modify dict in-place
for k in elem.keys():
elem[k] = self.__map_tensors(mapper, elem[k])
return elem
if isinstance(elem, tuple):
# Can't modify in place since it's a tuple
return new_tuple(type(elem), (self.__map_tensors(mapper, part) for part in elem))
# Tensor element
return mapper(elem)
def _validate_data_size(self, name, value):
# In torch case: check that we don't mess with gradients
if isinstance(value, to.Tensor):
assert not value.requires_grad, (
"Do not add gradient-sensitive tensors to SampleCollections. "
"This is a fast road to weird retain_graph errors!"
)
# Check type of data
if isinstance(value, dict):
# Validate dict entries
for k, v in value.items():
self._validate_data_size(f"{name}.{k}", v)
return
if isinstance(value, tuple):
# Validate dict entries
for i, v in enumerate(value):
self._validate_data_size(f"{name}[{i}]", v)
return
if isinstance(value, (np.ndarray, to.Tensor)):
# A single array. The first dimension must match
vlen = value.shape[0]
else:
# Should be a sequence
assert isinstance(value, Sequence)
vlen = len(value)
if self.continuous:
if not (vlen == self.length or vlen == self.length + 1):
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} or {self.length}+1 elements,"
f"but has {vlen} elements."
)
else:
# Disallow +1 tensors
if not vlen == self.length:
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} elements," f"but has {vlen} elements."
)
def _slice_entry(self, entry, index: slice):
if isinstance(entry, dict):
return {k: self._slice_entry(v, index) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._slice_entry(e, index) for e in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
return entry[index, ...]
elif isinstance(entry, list):
return entry[index]
else:
return None # unsupported
def _truncate_after_last(self, entry):
if isinstance(entry, dict):
return {k: self._truncate_after_last(v) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._truncate_after_last(v) for v in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
if entry.shape[0] == self.length + 1:
return entry[:-1, ...]
elif isinstance(entry, list):
if len(entry) == self.length + 1:
return entry[:-1]
# No truncation
return entry
def add_data(self, name: str, value=None, item_shape: tuple = None, with_after_last: Optional[bool] = False):
"""
Add a new data field to the step sequence.
:param name: string for the name
:param value: the data
:param item_shape: shape to store the data in
:param with_after_last: `True` if there is one more element than the length (e.g. last observation)
"""
if name in self._data_names:
raise pyrado.KeyErr(msg=f"Trying to add a duplicate data field for {name}!")
if value is None:
# Compute desired step length
ro_length = self.length
if with_after_last:
ro_length += 1
# Create zero-filled
if self._data_format == "torch":
value = to.zeros(to.Size([ro_length]) + to.Size(item_shape))
else:
value = np.array((ro_length,) + item_shape)
else:
# Check the data
self._validate_data_size(name, value)
if not isinstance(value, (np.ndarray, to.Tensor)):
# Stack into one array/tensor
value = stack_to_format(value, self._data_format)
else:
# Ensure right array format
value = to_format(value, self._data_format)
# Store in dict
self._data_names.append(name)
self.__dict__[name] = value
def get_data_values(self, name: str, truncate_last: Optional[bool] = False):
"""
Return the data tensor stored under the given name.
:param name: data name
:param truncate_last: True to truncate the length+1 entry if present
"""
assert name in self._data_names
entry = self.__dict__[name]
# Truncate if needed
if truncate_last:
# Check length
entry = self._truncate_after_last(entry)
return entry
def numpy(self, data_type=None):
"""
Convert data to numpy ndarrays.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("numpy", data_type)
def torch(self, data_type=None):
"""
Convert data to PyTorch Tensors.
:param data_type: type to return data in. When None is passed, the data type is left unchanged.
"""
self.convert("torch", data_type)
def convert(self, data_format: str, data_type=None):
"""
Convert data to specified format.
:param data_format: torch to use Tensors, numpy to use ndarrays
:param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged.
"""
if data_format not in {"torch", "numpy"}:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
if self._data_format == data_format:
return
self._data_format = data_format
for dn in self._data_names:
self.__dict__[dn] = self.__map_tensors(lambda t: to_format(t, data_format, data_type), self.__dict__[dn])
def get_rollout(self, index):
"""
Get an indexed sub-rollout.
:param index: generic index of sub-rollout, negative values, slices and iterables are allowed
:return: selected subset.
"""
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
if isinstance(index, slice):
# Analyze slice
start, end, step = index.indices(self.rollout_count)
if step == 1:
# A simple, continuous slice
bounds = self._rollout_bounds
start_step = bounds[start]
end_step = bounds[end]
return self[start_step:end_step]
# Convert nonstandard slice to range
index = range(start, end, step)
if isinstance(index, Iterable):
# Nontrivial non-continuous slice, need to slice each element and concat them.
return StepSequence.concat([self.get_rollout(i) for i in index], self.data_format)
# Decode index
index = _index_to_int(index, self.rollout_count)
bounds = self._rollout_bounds
start_step = bounds[index]
end_step = bounds[index + 1]
return self[start_step:end_step]
def iterate_rollouts(self):
""" Iterate over all sub-rollouts of a concatenated rollout. """
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
count = len(bounds) - 1
if count == 1:
# Optimize for single rollout
yield self
else:
for i in range(count):
start_step = bounds[i]
end_step = bounds[i + 1]
yield self[start_step:end_step]
def sample_w_next(self, batch_size: int) -> tuple:
"""
Sample a random batch of steps from a together with the associated next steps.
Similar to `split_shuffled_batches` with `complete_rollouts=False`
:param batch_size: number of steps to sample
:return: randomly sampled batch of steps
"""
if not self.length >= 2:
raise pyrado.ValueErr(given=self.length, ge_constraint="2")
shuffled_idcs = random.sample(range(self.length - 2), batch_size) # - 2 to always have a next step
shuffled_next_idcs = [i + 1 for i in shuffled_idcs]
steps = deepcopy(self[shuffled_idcs])
next_steps = deepcopy(self[shuffled_next_idcs])
return steps, next_steps
def split_ordered_batches(self, batch_size: int = None, num_batches: int = None):
"""
Batch generation. Split the step collection into ordered mini-batches of size batch_size.
:param batch_size: number of steps per batch, i.e. variable number of batches
:param num_batches: number of batches to split the rollout in, i.e. variable batch size
.. note::
Left out the option to return complete rollouts like for `split_shuffled_batches`.
"""
if batch_size is None and num_batches is None or batch_size is not None and num_batches is not None:
raise pyrado.ValueErr(msg="Either batch_size or num_batches must not be None, but not both or none!")
elif batch_size is not None and batch_size < 1:
raise pyrado.ValueErr(given=batch_size, ge_constraint="1 (int)")
elif num_batches is not None and num_batches < 1:
raise pyrado.ValueErr(given=num_batches, ge_constraint="1 (int)")
# Switch the splitting mode
if num_batches is not None:
batch_size = ceil(self.length / num_batches)
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
else:
# Split by steps
for b in gen_ordered_batch_idcs(batch_size, self.length, sorted=True):
yield self[b]
def split_shuffled_batches(self, batch_size: int, complete_rollouts: Optional[bool] = False):
"""
Batch generation. Split the step collection into random mini-batches of size batch_size.
:param batch_size: number of steps per batch
:param complete_rollouts: if `complete_rollouts = True`, the batches will not contain partial rollouts.
However, the size of the returned batches cannot be strictly maintained in this case.
.. note::
This method is also supposed to be called for recurrent networks, which have a different `evaluate()`
method that recognized where the rollouts end within a batch.
"""
if batch_size >= self.length:
# Yield all at once if there are less steps than the batch size
yield self
elif complete_rollouts and self.continuous:
# Our goal here is to randomly shuffle the rollouts, while returning batches of batch_size steps.
# The solution here is to take rollouts in a random order and yield a batch each time it exceeds batch_size.
rollout_lengths = self.rollout_lengths
shuffled_idcs = random.sample(range(len(rollout_lengths)), len(rollout_lengths))
# Now, walk through the rollouts in a random order and split once batch size is full.
batch = []
cur_batch_size = 0
for idx in shuffled_idcs:
batch.append(idx)
cur_batch_size += rollout_lengths[idx]
if cur_batch_size >= batch_size:
# Got a full batch
yield self.get_rollout(batch)
batch.clear()
cur_batch_size = 0
# Yield eventual final one
if batch:
yield self.get_rollout(batch)
else:
# Split by steps
for b in gen_shuffled_batch_idcs(batch_size, self.length):
yield self[b]
def undiscounted_return(self) -> float:
"""
Compute the undiscounted return.
:return: sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
return self.rewards.sum()
def discounted_return(self, gamma: float) -> (to.Tensor, np.ndarray):
"""
Compute the discounted return.
:param gamma: temporal discount factor
:return: exponentially weighted sum of rewards
"""
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
if not 0 <= gamma <= 1:
raise pyrado.ValueErr(given=gamma, ge_constraint="0", le_constraint="1")
if self.data_format == "torch":
return to.dot(self.rewards, (gamma ** to.arange(self.length)))
else:
return np.dot(self.rewards, (gamma ** np.arange(self.length)))
@classmethod
def concat(
cls, parts: Sequence["StepSequence"], data_format: Optional[str] = None, truncate_last: Optional[bool] = True
):
"""
Concatenate multiple step sequences into one, truncating the last observation.
:param parts: batch of sequences to concatenate
:param data_format: torch to use Tensors, numpy to use ndarrays, `None` to choose automatically
:param truncate_last: remove the last step from each part, highly recommended to be `True`
:return: concatenated sequence of `Steps`
"""
# Obtain data attribute names
data_names = parts[0].data_names
# Deduce data format if is None
if data_format is None:
data_format = parts[0].data_format
# Concat data fields
data = {
name: cat_to_format([ro.get_data_values(name, truncate_last) for ro in parts], data_format)
for name in data_names
}
# Treat done separately since it should stay a ndarray
done = np.concatenate([ro.done for ro in parts])
# Check if parts are continuous
continuous = all(ro.continuous for ro in parts)
rollout_bounds = None
if continuous:
# Concatenate rollout separator indices for continuous rollouts
rollout_bounds = [0]
acc_len = 0
for ro in parts:
rollout_bounds.extend(ro.rollout_bounds[1:] + acc_len)
acc_len += ro.rollout_bounds[-1]
return StepSequence(
data_format=data_format, done=done, continuous=continuous, rollout_bounds=rollout_bounds, **data
)
@classmethod
def process_data(
cls,
rollout: "StepSequence",
fcn: Callable,
fcn_arg_name: str,
fcn_arg_types: Union[type, Tuple[type]] = np.ndarray,
include_fields: Sequence[str] = None,
exclude_fields: Sequence[str] = None,
**process_fcn_kwargs,
):
"""
Process all data fields of a rollouts using an arbitrary function. Optionally, some fields can be excluded.
:param rollout: `StepSequence` holding the data
:param fcn: function (of one remaining input) to used manipulate the data fields, e.g. `scipy.filtfilt()`
:param fcn_arg_name: sting of the remaining input of `process_fcn()`, e.g. `x` for `scipy.filtfilt()`
:param fcn_arg_types: type or tuple thereof which are expected as input to `fcn()`
:param include_fields: list of field names to include for processing, pass `None` to not include everything.
If specified, only fields from this selection will be considered
:param exclude_fields: list of field names to exclude from processing, pass `None` to not exclude anything
:param process_fcn_kwargs: keyword arguments forwarded to `process_fcn()`
:return: new `StepSequence` instance with processed data
"""
@functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
""" Wrap the processing function to call it recursivelyy for nested data structures. """
# Add to actual data input to the keyword arguments to make calling the function easier
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
# Process the data
inp = fcn(**kwargs)
elif isinstance(inp, dict):
# Recursive call
for key, value in inp.items():
if isinstance(value, fcn_arg_types):
inp[key] = recursive_wrapper(value, **kwargs)
else:
inp[key] = value
elif isinstance(inp, list):
# Recursive call
for idx, item in enumerate(inp):
if isinstance(item, fcn_arg_types):
inp[idx] = recursive_wrapper(item, **kwargs)
else:
inp[idx] = item
return inp
# Go through all desired data fields and apply the processing function
data_dict = dict()
include_fields = include_fields or rollout.data_names
exclude_fields = exclude_fields or []
for name in rollout.data_names:
# Extract data field
data = rollout.get_data_values(name)
# Process current data field if included and not explicitly excluded
if name in include_fields and name not in exclude_fields:
data = recursive_wrapper(data, **process_fcn_kwargs)
# Collect the new/old data
data_dict[name] = data
# Create new object
return StepSequence(**data_dict, rollout_info=rollout.rollout_info, continuous=rollout.continuous)
def discounted_reverse_cumsum(data, gamma: float):
"""
Use a linear filter to compute the reverse discounted cumulative sum.
.. note::
`scipy.signal.lfilter` assumes an initialization with 0 by default.
:param data: input data with samples along the 0 axis (e.g. time series)
:param gamma: discount factor
:return: cumulative sums for every step
"""
return signal.lfilter([1], [1, -gamma], data[::-1], axis=0)[::-1]
def discounted_value(rollout: StepSequence, gamma: float):
"""
Compute the discounted state values for one rollout.
:param rollout: input data
:param gamma: temporal discount factor
:return: state values for every time step in the rollout
"""
rewards = [step.reward for step in rollout]
return discounted_reverse_cumsum(rewards, gamma)
def discounted_values(rollouts: Sequence[StepSequence], gamma: float, data_format: Optional[str] = "torch"):
"""
Compute the discounted state values for multiple rollouts.
:param rollouts: input data
:param gamma: temporal discount factor
:param data_format: data format of the given
:return: state values for every time step in the rollouts (concatenated sequence across rollouts)
"""
if data_format == "torch":
# The ndarray.copy() is necessary due to (currently) unsupported negative strides
return to.cat([to.from_numpy(discounted_value(ro, gamma).copy()).to(to.get_default_dtype()) for ro in rollouts])
elif data_format == "numpy":
raise np.array([discounted_value(ro, gamma) for ro in rollouts])
else:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
def gae_returns(rollout: StepSequence, gamma: float = 0.99, lamb: float = 0.95):
"""
Compute returns using generalized advantage estimation.
.. seealso::
[1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using
Generalized Advantage Estimation', ICLR 2016
:param rollout: sequence of steps
:param gamma: temporal discount factor
:param lamb: discount factor
:return: estimated advantage
"""
def _next_value(step: Step) -> float:
""" Helper to return `next_value = 0` for last step """
if step.done:
return 0.0
return step.next_value
deltas = [step.reward + gamma * _next_value(step) - step.value for step in rollout]
cumsum = discounted_reverse_cumsum(deltas, gamma * lamb)
return cumsum
| 40.965142
| 120
| 0.625725
|
import functools
import numpy as np
import operator
import random
import scipy.signal as signal
import torch as to
from collections.abc import Iterable
from copy import deepcopy
from math import ceil
from typing import Sequence, Type, Optional, Union, Callable, Tuple
import pyrado
from pyrado.sampling.data_format import stack_to_format, to_format, cat_to_format, new_tuple
from pyrado.sampling.utils import gen_shuffled_batch_idcs, gen_ordered_batch_idcs
def _index_to_int(idx, n):
idx = operator.index(idx)
if idx < 0:
idx += n
if idx < 0 or idx >= n:
raise IndexError
return idx
class DictIndexProxy:
__slots__ = ("__dict__", "_obj", "_index", "_prefix")
def __init__(self, obj: dict, index: int, path: Optional[str] = None):
super().__init__()
self._obj = obj
self._index = index
if path:
self._prefix = path + "."
else:
self._prefix = ""
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
return key, index
def _get_keyed_value(self, key, error_type: Type[Exception] = RuntimeError):
value = self._obj.get(key, None)
if value is None:
value = self._obj.get(key + "s", None)
if value is None:
raise error_type(f"No entry named {self._prefix}{key}")
return value
def _index_value(self, key, value, index, error_type: Type[Exception] = RuntimeError):
if isinstance(value, dict):
return DictIndexProxy(value, index, self._prefix + key)
elif isinstance(value, tuple):
# Use type(value) to support named tuples. (the keys is still index though)
return new_tuple(
type(value), (self._index_value(f"{key}[{i}]", v, index, error_type) for i, v in enumerate(value))
)
elif isinstance(value, (to.Tensor, np.ndarray)):
# Return slice of ndarray / tensor
return value[index, ...]
elif isinstance(value, list):
# Return list item
return value[index]
else:
# Unsupported type
raise error_type(f"Entry {self._prefix}{key} has un-gettable type {type(value)}")
def _get_indexed_value(self, key, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
return self._index_value(key, value, index, error_type)
def _set_indexed_value(self, key, new_value, error_type: Type[Exception] = RuntimeError):
real_key, index = self._process_key(key, self._index, error_type)
# Obtain keyed value list from obj dict
value = self._get_keyed_value(real_key, error_type=error_type)
# Set value to data
if isinstance(value, (to.Tensor, np.ndarray)):
# Set slice of ndarray/tensor
value[index, ...] = new_value
elif isinstance(value, list):
# Set list item
value[index] = new_value
else:
# Don't support setting dict proxies
raise error_type(f"Entry {key} has un-settable type {type(value)}")
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError
result = self._get_indexed_value(key, error_type=AttributeError)
self.__dict__[key] = result
return result
def __setattr__(self, key, value):
if not key.startswith("_"):
try:
self._set_indexed_value(key, value, error_type=AttributeError)
except AttributeError:
pass
else:
self.__dict__[key] = value
return
object.__setattr__(self, key, value)
def __dir__(self):
return [k for k in self._obj if not k.startswith("_")]
def __getitem__(self, key):
result = self._get_indexed_value(key, error_type=KeyError)
self.__dict__[key] = result
return result
def __setitem__(self, key, value):
self._set_indexed_value(key, value, error_type=KeyError)
self.__dict__[key] = value
def __getstate__(self):
return {"obj", self._obj, "index", self._index}
def __setstate__(self, state):
self._obj = state["obj"]
self._index = state["index"]
class Step(DictIndexProxy):
__slots__ = "_rollout"
def __init__(self, rollout, index):
super(Step, self).__init__(rollout.__dict__, index)
self._rollout = rollout
def _process_key(self, key: str, index: int, error_type: Type[Exception]):
if key.startswith("next_"):
if not self._rollout.continuous:
raise error_type("Access to next element is not supported for non-continuous rollouts!")
key = key[5:]
index += 1
if key not in self._rollout.data_names and key + "s" not in self._rollout.data_names and key != "done":
raise error_type(f"No such rollout data field: {key}")
return key, index
# Serialize rollout and index
def __getstate__(self):
return {"rollout", self._rollout, "index", self._index}
def __setstate__(self, state):
self._rollout = state["rollout"]
self._obj = self._rollout.__dict__
self._index = state["index"]
class StepSequence(Sequence[Step]):
rewards: Union[np.ndarray, to.Tensor]
observations: Union[np.ndarray, to.Tensor]
actions: Union[np.ndarray, to.Tensor]
# Set of required rollout fields in addition to rewards, observations, actions. Putting this into a class field
# instead of using the constructor arguments reduces duplicate code and allows to override it during unit tests.
required_fields = {}
def __init__(
self,
*,
complete: Optional[bool] = True,
rollout_info=None,
data_format: Optional[str] = None,
done: Optional[np.ndarray] = None,
continuous: Optional[bool] = True,
rollout_bounds=None,
rewards: Sequence,
observations: Sequence,
actions: Sequence,
**data,
):
# print (data)
# Obtain rollout length from reward list
self.length = len(rewards)
if self.length == 0:
raise pyrado.ShapeErr(msg="StepSequence cannot be empty!")
# Set singular attributes
self.rollout_info = rollout_info
self.continuous = continuous
# Infer if this instance is using numpy arrays or PyTorch tensors
if data_format is None:
# We ignore rewards here since it's probably scalar
for value in data.values():
if isinstance(value, to.Tensor) or (isinstance(value, list) and isinstance(value[0], to.Tensor)):
data_format = "torch"
break
else:
data_format = "numpy"
self._data_format = data_format
missing_fields = StepSequence.required_fields - data.keys()
if missing_fields:
raise ValueError(f"Missing required data fields: {missing_fields}")
self._data_names = []
self.add_data("rewards", rewards)
self.add_data("observations", observations)
self.add_data("actions", actions)
for name, value in data.items():
self.add_data(name, value)
if done is None:
done = np.zeros(self.length, dtype=np.bool)
if complete and continuous:
done[-1] = True
else:
done = np.asarray(done, dtype=np.bool)
assert done.shape[0] == self.length
self.done = done
# Compute rollout bounds from done list (yes this is not exactly safe...)
# The bounds list has one extra entry 0, this simplifies queries greatly.
# bounds[i] = start of rollout i; bounds[i+1]=end of rollout i
if continuous:
if rollout_bounds is None:
rollout_bounds = [0]
rollout_bounds.extend(np.flatnonzero(done) + 1)
if not done[-1]:
rollout_bounds.append(self.length)
else:
# Validate externally passed bounds.
for i in range(len(rollout_bounds) - 1):
assert rollout_bounds[i] < rollout_bounds[i + 1]
assert rollout_bounds[0] == 0
assert rollout_bounds[-1] == self.length
self._rollout_bounds = np.array(rollout_bounds)
else:
self._rollout_bounds = None
@property
def data_format(self) -> str:
return self._data_format
@property
def data_names(self) -> Sequence[str]:
return self._data_names
@property
def rollout_bounds(self) -> np.ndarray:
return self._rollout_bounds
@property
def rollout_count(self):
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
return len(self._rollout_bounds) - 1
@property
def rollout_lengths(self):
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
return bounds[1:] - bounds[:-1]
def __len__(self):
return self.length
def __getitem__(self, index):
if isinstance(index, slice) or isinstance(index, Iterable):
# Return a StepSequence object with the subset. Build sliced data dict.
sliced_data = {name: self._slice_entry(self.__dict__[name], index) for name in self._data_names}
sliced_data = {k: v for k, v in sliced_data.items() if v is not None}
# Check if the slice is continuous
continuous = isinstance(index, slice) and (index.step is None or index.step == 1)
rollout_bounds = None
if continuous:
# Slice rollout bounds too.
start, end, _ = index.indices(self.length)
rollout_bounds = [0]
for b in self._rollout_bounds:
if start < b < end:
rollout_bounds.append(b - start)
rollout_bounds.append(end - start)
return StepSequence(
rollout_info=self.rollout_info,
data_format=self._data_format,
done=self.done[index],
continuous=continuous,
rollout_bounds=rollout_bounds,
**sliced_data,
)
# Should be a singular element index. Return step proxy.
return Step(self, _index_to_int(index, self.length))
def __map_tensors(self, mapper, elem):
if isinstance(elem, dict):
# Modify dict in-place
for k in elem.keys():
elem[k] = self.__map_tensors(mapper, elem[k])
return elem
if isinstance(elem, tuple):
# Can't modify in place since it's a tuple
return new_tuple(type(elem), (self.__map_tensors(mapper, part) for part in elem))
# Tensor element
return mapper(elem)
def _validate_data_size(self, name, value):
# In torch case: check that we don't mess with gradients
if isinstance(value, to.Tensor):
assert not value.requires_grad, (
"Do not add gradient-sensitive tensors to SampleCollections. "
"This is a fast road to weird retain_graph errors!"
)
if isinstance(value, dict):
for k, v in value.items():
self._validate_data_size(f"{name}.{k}", v)
return
if isinstance(value, tuple):
for i, v in enumerate(value):
self._validate_data_size(f"{name}[{i}]", v)
return
if isinstance(value, (np.ndarray, to.Tensor)):
vlen = value.shape[0]
else:
assert isinstance(value, Sequence)
vlen = len(value)
if self.continuous:
if not (vlen == self.length or vlen == self.length + 1):
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} or {self.length}+1 elements,"
f"but has {vlen} elements."
)
else:
if not vlen == self.length:
raise pyrado.ShapeErr(
msg=f"The data list {name} must have {self.length} elements," f"but has {vlen} elements."
)
def _slice_entry(self, entry, index: slice):
if isinstance(entry, dict):
return {k: self._slice_entry(v, index) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._slice_entry(e, index) for e in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
return entry[index, ...]
elif isinstance(entry, list):
return entry[index]
else:
return None
def _truncate_after_last(self, entry):
if isinstance(entry, dict):
return {k: self._truncate_after_last(v) for k, v in entry.items()}
if isinstance(entry, tuple):
return new_tuple(type(entry), (self._truncate_after_last(v) for v in entry))
elif isinstance(entry, (to.Tensor, np.ndarray)):
if entry.shape[0] == self.length + 1:
return entry[:-1, ...]
elif isinstance(entry, list):
if len(entry) == self.length + 1:
return entry[:-1]
return entry
def add_data(self, name: str, value=None, item_shape: tuple = None, with_after_last: Optional[bool] = False):
if name in self._data_names:
raise pyrado.KeyErr(msg=f"Trying to add a duplicate data field for {name}!")
if value is None:
ro_length = self.length
if with_after_last:
ro_length += 1
if self._data_format == "torch":
value = to.zeros(to.Size([ro_length]) + to.Size(item_shape))
else:
value = np.array((ro_length,) + item_shape)
else:
self._validate_data_size(name, value)
if not isinstance(value, (np.ndarray, to.Tensor)):
value = stack_to_format(value, self._data_format)
else:
value = to_format(value, self._data_format)
self._data_names.append(name)
self.__dict__[name] = value
def get_data_values(self, name: str, truncate_last: Optional[bool] = False):
assert name in self._data_names
entry = self.__dict__[name]
if truncate_last:
entry = self._truncate_after_last(entry)
return entry
def numpy(self, data_type=None):
self.convert("numpy", data_type)
def torch(self, data_type=None):
self.convert("torch", data_type)
def convert(self, data_format: str, data_type=None):
if data_format not in {"torch", "numpy"}:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
if self._data_format == data_format:
return
self._data_format = data_format
for dn in self._data_names:
self.__dict__[dn] = self.__map_tensors(lambda t: to_format(t, data_format, data_type), self.__dict__[dn])
def get_rollout(self, index):
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
if isinstance(index, slice):
start, end, step = index.indices(self.rollout_count)
if step == 1:
bounds = self._rollout_bounds
start_step = bounds[start]
end_step = bounds[end]
return self[start_step:end_step]
index = range(start, end, step)
if isinstance(index, Iterable):
return StepSequence.concat([self.get_rollout(i) for i in index], self.data_format)
index = _index_to_int(index, self.rollout_count)
bounds = self._rollout_bounds
start_step = bounds[index]
end_step = bounds[index + 1]
return self[start_step:end_step]
def iterate_rollouts(self):
if not self.continuous:
raise pyrado.ValueErr(msg="Sub-rollouts are only supported on continuous data.")
bounds = self._rollout_bounds
count = len(bounds) - 1
if count == 1:
yield self
else:
for i in range(count):
start_step = bounds[i]
end_step = bounds[i + 1]
yield self[start_step:end_step]
def sample_w_next(self, batch_size: int) -> tuple:
if not self.length >= 2:
raise pyrado.ValueErr(given=self.length, ge_constraint="2")
shuffled_idcs = random.sample(range(self.length - 2), batch_size)
shuffled_next_idcs = [i + 1 for i in shuffled_idcs]
steps = deepcopy(self[shuffled_idcs])
next_steps = deepcopy(self[shuffled_next_idcs])
return steps, next_steps
def split_ordered_batches(self, batch_size: int = None, num_batches: int = None):
if batch_size is None and num_batches is None or batch_size is not None and num_batches is not None:
raise pyrado.ValueErr(msg="Either batch_size or num_batches must not be None, but not both or none!")
elif batch_size is not None and batch_size < 1:
raise pyrado.ValueErr(given=batch_size, ge_constraint="1 (int)")
elif num_batches is not None and num_batches < 1:
raise pyrado.ValueErr(given=num_batches, ge_constraint="1 (int)")
if num_batches is not None:
batch_size = ceil(self.length / num_batches)
if batch_size >= self.length:
yield self
else:
for b in gen_ordered_batch_idcs(batch_size, self.length, sorted=True):
yield self[b]
def split_shuffled_batches(self, batch_size: int, complete_rollouts: Optional[bool] = False):
if batch_size >= self.length:
yield self
elif complete_rollouts and self.continuous:
rollout_lengths = self.rollout_lengths
shuffled_idcs = random.sample(range(len(rollout_lengths)), len(rollout_lengths))
batch = []
cur_batch_size = 0
for idx in shuffled_idcs:
batch.append(idx)
cur_batch_size += rollout_lengths[idx]
if cur_batch_size >= batch_size:
yield self.get_rollout(batch)
batch.clear()
cur_batch_size = 0
if batch:
yield self.get_rollout(batch)
else:
for b in gen_shuffled_batch_idcs(batch_size, self.length):
yield self[b]
def undiscounted_return(self) -> float:
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
return self.rewards.sum()
def discounted_return(self, gamma: float) -> (to.Tensor, np.ndarray):
if not len(self._rollout_bounds) == 2:
raise pyrado.ShapeErr(msg="The StepSequence must be a single continuous rollout.")
if not 0 <= gamma <= 1:
raise pyrado.ValueErr(given=gamma, ge_constraint="0", le_constraint="1")
if self.data_format == "torch":
return to.dot(self.rewards, (gamma ** to.arange(self.length)))
else:
return np.dot(self.rewards, (gamma ** np.arange(self.length)))
@classmethod
def concat(
cls, parts: Sequence["StepSequence"], data_format: Optional[str] = None, truncate_last: Optional[bool] = True
):
data_names = parts[0].data_names
if data_format is None:
data_format = parts[0].data_format
data = {
name: cat_to_format([ro.get_data_values(name, truncate_last) for ro in parts], data_format)
for name in data_names
}
done = np.concatenate([ro.done for ro in parts])
continuous = all(ro.continuous for ro in parts)
rollout_bounds = None
if continuous:
rollout_bounds = [0]
acc_len = 0
for ro in parts:
rollout_bounds.extend(ro.rollout_bounds[1:] + acc_len)
acc_len += ro.rollout_bounds[-1]
return StepSequence(
data_format=data_format, done=done, continuous=continuous, rollout_bounds=rollout_bounds, **data
)
@classmethod
def process_data(
cls,
rollout: "StepSequence",
fcn: Callable,
fcn_arg_name: str,
fcn_arg_types: Union[type, Tuple[type]] = np.ndarray,
include_fields: Sequence[str] = None,
exclude_fields: Sequence[str] = None,
**process_fcn_kwargs,
):
@functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
inp = fcn(**kwargs)
elif isinstance(inp, dict):
for key, value in inp.items():
if isinstance(value, fcn_arg_types):
inp[key] = recursive_wrapper(value, **kwargs)
else:
inp[key] = value
elif isinstance(inp, list):
for idx, item in enumerate(inp):
if isinstance(item, fcn_arg_types):
inp[idx] = recursive_wrapper(item, **kwargs)
else:
inp[idx] = item
return inp
data_dict = dict()
include_fields = include_fields or rollout.data_names
exclude_fields = exclude_fields or []
for name in rollout.data_names:
data = rollout.get_data_values(name)
if name in include_fields and name not in exclude_fields:
data = recursive_wrapper(data, **process_fcn_kwargs)
data_dict[name] = data
return StepSequence(**data_dict, rollout_info=rollout.rollout_info, continuous=rollout.continuous)
def discounted_reverse_cumsum(data, gamma: float):
return signal.lfilter([1], [1, -gamma], data[::-1], axis=0)[::-1]
def discounted_value(rollout: StepSequence, gamma: float):
rewards = [step.reward for step in rollout]
return discounted_reverse_cumsum(rewards, gamma)
def discounted_values(rollouts: Sequence[StepSequence], gamma: float, data_format: Optional[str] = "torch"):
if data_format == "torch":
return to.cat([to.from_numpy(discounted_value(ro, gamma).copy()).to(to.get_default_dtype()) for ro in rollouts])
elif data_format == "numpy":
raise np.array([discounted_value(ro, gamma) for ro in rollouts])
else:
raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
def gae_returns(rollout: StepSequence, gamma: float = 0.99, lamb: float = 0.95):
def _next_value(step: Step) -> float:
if step.done:
return 0.0
return step.next_value
deltas = [step.reward + gamma * _next_value(step) - step.value for step in rollout]
cumsum = discounted_reverse_cumsum(deltas, gamma * lamb)
return cumsum
| true
| true
|
7906c16893776fa48ba33f24ccd5d85b9c43c67e
| 1,750
|
py
|
Python
|
openpeerpower/components/iaqualink/sensor.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
openpeerpower/components/iaqualink/sensor.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
openpeerpower/components/iaqualink/sensor.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""Support for Aqualink temperature sensors."""
from __future__ import annotations
from openpeerpower.components.sensor import DOMAIN, SensorEntity
from openpeerpower.config_entries import ConfigEntry
from openpeerpower.const import DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from openpeerpower.core import OpenPeerPower
from . import AqualinkEntity
from .const import DOMAIN as AQUALINK_DOMAIN
PARALLEL_UPDATES = 0
async def async_setup_entry(
opp: OpenPeerPower, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up discovered sensors."""
devs = []
for dev in opp.data[AQUALINK_DOMAIN][DOMAIN]:
devs.append(OppAqualinkSensor(dev))
async_add_entities(devs, True)
class OppAqualinkSensor(AqualinkEntity, SensorEntity):
"""Representation of a sensor."""
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self.dev.label
@property
def unit_of_measurement(self) -> str | None:
"""Return the measurement unit for the sensor."""
if self.dev.name.endswith("_temp"):
if self.dev.system.temp_unit == "F":
return TEMP_FAHRENHEIT
return TEMP_CELSIUS
return None
@property
def state(self) -> str | None:
"""Return the state of the sensor."""
if self.dev.state == "":
return None
try:
state = int(self.dev.state)
except ValueError:
state = float(self.dev.state)
return state
@property
def device_class(self) -> str | None:
"""Return the class of the sensor."""
if self.dev.name.endswith("_temp"):
return DEVICE_CLASS_TEMPERATURE
return None
| 29.166667
| 87
| 0.662286
|
from __future__ import annotations
from openpeerpower.components.sensor import DOMAIN, SensorEntity
from openpeerpower.config_entries import ConfigEntry
from openpeerpower.const import DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from openpeerpower.core import OpenPeerPower
from . import AqualinkEntity
from .const import DOMAIN as AQUALINK_DOMAIN
PARALLEL_UPDATES = 0
async def async_setup_entry(
opp: OpenPeerPower, config_entry: ConfigEntry, async_add_entities
) -> None:
devs = []
for dev in opp.data[AQUALINK_DOMAIN][DOMAIN]:
devs.append(OppAqualinkSensor(dev))
async_add_entities(devs, True)
class OppAqualinkSensor(AqualinkEntity, SensorEntity):
@property
def name(self) -> str:
return self.dev.label
@property
def unit_of_measurement(self) -> str | None:
if self.dev.name.endswith("_temp"):
if self.dev.system.temp_unit == "F":
return TEMP_FAHRENHEIT
return TEMP_CELSIUS
return None
@property
def state(self) -> str | None:
if self.dev.state == "":
return None
try:
state = int(self.dev.state)
except ValueError:
state = float(self.dev.state)
return state
@property
def device_class(self) -> str | None:
if self.dev.name.endswith("_temp"):
return DEVICE_CLASS_TEMPERATURE
return None
| true
| true
|
7906c26528c0471f76655970ecdc1728764aaf49
| 1,959
|
py
|
Python
|
opencolorio_config_aces/config/__init__.py
|
michdolan/OpenColorIO-Config-ACES
|
5216c2a184e03529557993b7dc670d351aadddc7
|
[
"BSD-3-Clause"
] | null | null | null |
opencolorio_config_aces/config/__init__.py
|
michdolan/OpenColorIO-Config-ACES
|
5216c2a184e03529557993b7dc670d351aadddc7
|
[
"BSD-3-Clause"
] | null | null | null |
opencolorio_config_aces/config/__init__.py
|
michdolan/OpenColorIO-Config-ACES
|
5216c2a184e03529557993b7dc670d351aadddc7
|
[
"BSD-3-Clause"
] | null | null | null |
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
from .generation import (
TRANSFORM_FACTORIES,
colorspace_factory,
group_transform_factory,
look_factory,
named_transform_factory,
produce_transform,
transform_factory,
transform_factory_clf_transform_to_group_transform,
transform_factory_default,
view_transform_factory,
)
from .generation import (
ConfigData,
VersionData,
deserialize_config_data,
generate_config,
serialize_config_data,
validate_config,
)
from .reference import (
build_aces_conversion_graph,
classify_aces_ctl_transforms,
conversion_path,
ctl_transform_to_node,
discover_aces_ctl_transforms,
filter_ctl_transforms,
filter_nodes,
node_to_ctl_transform,
plot_aces_conversion_graph,
print_aces_taxonomy,
unclassify_ctl_transforms,
)
from .reference import (
ColorspaceDescriptionStyle,
generate_config_aces,
)
from .cg import generate_config_cg
__all__ = [
"TRANSFORM_FACTORIES",
"colorspace_factory",
"group_transform_factory",
"look_factory",
"named_transform_factory",
"produce_transform",
"transform_factory",
"transform_factory_clf_transform_to_group_transform",
"transform_factory_default",
"view_transform_factory",
]
__all__ += [
"ConfigData",
"VersionData",
"deserialize_config_data",
"generate_config",
"serialize_config_data",
"validate_config",
]
__all__ += [
"build_aces_conversion_graph",
"classify_aces_ctl_transforms",
"conversion_path",
"ctl_transform_to_node",
"discover_aces_ctl_transforms",
"filter_ctl_transforms",
"filter_nodes",
"node_to_ctl_transform",
"plot_aces_conversion_graph",
"print_aces_taxonomy",
"unclassify_ctl_transforms",
]
__all__ += [
"ColorspaceDescriptionStyle",
"generate_config_aces",
]
__all__ += ["generate_config_cg"]
| 24.185185
| 57
| 0.743236
|
from .generation import (
TRANSFORM_FACTORIES,
colorspace_factory,
group_transform_factory,
look_factory,
named_transform_factory,
produce_transform,
transform_factory,
transform_factory_clf_transform_to_group_transform,
transform_factory_default,
view_transform_factory,
)
from .generation import (
ConfigData,
VersionData,
deserialize_config_data,
generate_config,
serialize_config_data,
validate_config,
)
from .reference import (
build_aces_conversion_graph,
classify_aces_ctl_transforms,
conversion_path,
ctl_transform_to_node,
discover_aces_ctl_transforms,
filter_ctl_transforms,
filter_nodes,
node_to_ctl_transform,
plot_aces_conversion_graph,
print_aces_taxonomy,
unclassify_ctl_transforms,
)
from .reference import (
ColorspaceDescriptionStyle,
generate_config_aces,
)
from .cg import generate_config_cg
__all__ = [
"TRANSFORM_FACTORIES",
"colorspace_factory",
"group_transform_factory",
"look_factory",
"named_transform_factory",
"produce_transform",
"transform_factory",
"transform_factory_clf_transform_to_group_transform",
"transform_factory_default",
"view_transform_factory",
]
__all__ += [
"ConfigData",
"VersionData",
"deserialize_config_data",
"generate_config",
"serialize_config_data",
"validate_config",
]
__all__ += [
"build_aces_conversion_graph",
"classify_aces_ctl_transforms",
"conversion_path",
"ctl_transform_to_node",
"discover_aces_ctl_transforms",
"filter_ctl_transforms",
"filter_nodes",
"node_to_ctl_transform",
"plot_aces_conversion_graph",
"print_aces_taxonomy",
"unclassify_ctl_transforms",
]
__all__ += [
"ColorspaceDescriptionStyle",
"generate_config_aces",
]
__all__ += ["generate_config_cg"]
| true
| true
|
7906c47548dd1461c3e11d59e7081c666276c220
| 1,952
|
py
|
Python
|
test_app/settings.py
|
AngelKey/Angelkey.proofintegration
|
e71228a991df342afc3159defbf0ea71a723a98d
|
[
"BSD-3-Clause"
] | 17
|
2019-04-01T14:35:42.000Z
|
2021-06-23T01:59:44.000Z
|
test_app/settings.py
|
AngelKey/Angelkey.proofintegration
|
e71228a991df342afc3159defbf0ea71a723a98d
|
[
"BSD-3-Clause"
] | 2
|
2018-10-26T14:34:55.000Z
|
2019-04-26T13:51:10.000Z
|
test_app/settings.py
|
AngelKey/Angelkey.proofintegration
|
e71228a991df342afc3159defbf0ea71a723a98d
|
[
"BSD-3-Clause"
] | 7
|
2019-04-23T14:28:18.000Z
|
2021-11-13T02:57:42.000Z
|
# The most basic of settings to get the app to run as an example, should *never* be used in a
# production environment.
import os
import dj_database_url
DATABASES = {}
db_url = os.environ.get('DATABASE_URL', '')
if db_url:
DATABASES['default'] = dj_database_url.parse(db_url, conn_max_age=600, ssl_require=True)
else:
DATABASES['default'] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'dr.sqlite3',
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.contenttypes',
'django.contrib.admin',
'django.contrib.messages',
'keybase_proofs',
'test_app',
)
DEBUG = True
ALLOWED_HOSTS = ['*']
SECRET_KEY = '_'
SITE_ID = 1
ROOT_URLCONF = 'test_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
'loaders': [
'django.template.loaders.app_directories.Loader',
],
},
},
]
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Must match the `domain` set in the config.
KEYBASE_PROOFS_DOMAIN = '<your-domain.com>'
| 27.885714
| 93
| 0.653176
|
import os
import dj_database_url
DATABASES = {}
db_url = os.environ.get('DATABASE_URL', '')
if db_url:
DATABASES['default'] = dj_database_url.parse(db_url, conn_max_age=600, ssl_require=True)
else:
DATABASES['default'] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'dr.sqlite3',
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.contenttypes',
'django.contrib.admin',
'django.contrib.messages',
'keybase_proofs',
'test_app',
)
DEBUG = True
ALLOWED_HOSTS = ['*']
SECRET_KEY = '_'
SITE_ID = 1
ROOT_URLCONF = 'test_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
'loaders': [
'django.template.loaders.app_directories.Loader',
],
},
},
]
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
KEYBASE_PROOFS_DOMAIN = '<your-domain.com>'
| true
| true
|
7906c546bcdfd796db7b12cb7bbc4853a66df11f
| 398
|
py
|
Python
|
packages/ml_api/tests/conftest.py
|
iameminmammadov/datacube_bigmart_lighthouse
|
74de8e87bc0482845530c23871d1b113acc11a81
|
[
"MIT"
] | null | null | null |
packages/ml_api/tests/conftest.py
|
iameminmammadov/datacube_bigmart_lighthouse
|
74de8e87bc0482845530c23871d1b113acc11a81
|
[
"MIT"
] | null | null | null |
packages/ml_api/tests/conftest.py
|
iameminmammadov/datacube_bigmart_lighthouse
|
74de8e87bc0482845530c23871d1b113acc11a81
|
[
"MIT"
] | null | null | null |
import pytest
from ml_api.api.app import create_app
from ml_api.api.config import TestingConfig
#Fixtures provide an easy way to setup and teardown resources
@pytest.fixture
def app():
app = create_app(config_object=TestingConfig)
with app.app_context():
yield app
@pytest.fixture
def flask_test_client(app):
with app.test_client() as test_client:
yield test_client
| 22.111111
| 61
| 0.753769
|
import pytest
from ml_api.api.app import create_app
from ml_api.api.config import TestingConfig
@pytest.fixture
def app():
app = create_app(config_object=TestingConfig)
with app.app_context():
yield app
@pytest.fixture
def flask_test_client(app):
with app.test_client() as test_client:
yield test_client
| true
| true
|
7906c6207fce56a30524aafb61f45614f85552a6
| 4,380
|
py
|
Python
|
models/image_segmentation/tensorflow/3d_unet_mlperf/inference/nnUNet/preprocess.py
|
ashahba/models
|
c08d1ea02083814d3a31f9695c5bbf5c7704a8a7
|
[
"Apache-2.0"
] | 357
|
2019-01-23T23:54:30.000Z
|
2022-03-31T05:32:25.000Z
|
models/image_segmentation/tensorflow/3d_unet_mlperf/inference/nnUNet/preprocess.py
|
ashahba/models
|
c08d1ea02083814d3a31f9695c5bbf5c7704a8a7
|
[
"Apache-2.0"
] | 65
|
2019-02-06T15:35:35.000Z
|
2022-03-25T09:56:48.000Z
|
models/image_segmentation/tensorflow/3d_unet_mlperf/inference/nnUNet/preprocess.py
|
ashahba/models
|
c08d1ea02083814d3a31f9695c5bbf5c7704a8a7
|
[
"Apache-2.0"
] | 164
|
2019-02-06T15:05:57.000Z
|
2022-03-31T11:48:14.000Z
|
# coding=utf-8
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file has been copied from
# https://github.com/mlcommons/inference/blob/r0.7/vision/medical_imaging/3d-unet/preprocess.py
import argparse
import numpy
import os
import pickle
import sys
import torch
from batchgenerators.augmentations.utils import pad_nd_image
from batchgenerators.utilities.file_and_folder_operations import subfiles
from nnunet.training.model_restore import load_model_and_checkpoint_files
from nnunet.inference.predict import preprocess_multithreaded
def preprocess_MLPerf(model, checkpoint_name, folds, fp16, list_of_lists, output_filenames, preprocessing_folder, num_threads_preprocessing):
assert len(list_of_lists) == len(output_filenames)
print("loading parameters for folds", folds)
trainer, params = load_model_and_checkpoint_files(model, folds, fp16, checkpoint_name=checkpoint_name)
print("starting preprocessing generator")
preprocessing = preprocess_multithreaded(trainer, list_of_lists, output_filenames, num_threads_preprocessing, None)
print("Preprocessing images...")
all_output_files = []
for preprocessed in preprocessing:
output_filename, (d, dct) = preprocessed
all_output_files.append(output_filename)
if isinstance(d, str):
data = np.load(d)
os.remove(d)
d = data
# Pad to the desired full volume
d = pad_nd_image(d, trainer.patch_size, "constant", None, False, None)
with open(os.path.join(preprocessing_folder, output_filename+ ".pkl"), "wb") as f:
pickle.dump([d, dct], f)
f.close()
return all_output_files
def preprocess_setup(preprocessed_data_dir):
print("Preparing for preprocessing data...")
# Validation set is fold 1
fold = 1
validation_fold_file = '../models/image_segmentation/tensorflow/3d_unet_mlperf/inference/nnUNet/folds/fold1_validation.txt'
# Make sure the model exists
model_dir = 'build/result/nnUNet/3d_fullres/Task043_BraTS2019/nnUNetTrainerV2__nnUNetPlansv2.mlperf.1'
model_path = os.path.join(model_dir, "plans.pkl")
assert os.path.isfile(model_path), "Cannot find the model file {:}!".format(model_path)
checkpoint_name = "model_final_checkpoint"
# Other settings
fp16 = False
num_threads_preprocessing = 12
raw_data_dir = 'build/raw_data/nnUNet_raw_data/Task043_BraTS2019/imagesTr'
# Open list containing validation images from specific fold (e.g. 1)
validation_files = []
with open(validation_fold_file) as f:
for line in f:
validation_files.append(line.rstrip())
# Create output and preprocessed directory
if not os.path.isdir(preprocessed_data_dir):
os.makedirs(preprocessed_data_dir)
# Create list of images locations (i.e. 4 images per case => 4 modalities)
all_files = subfiles(raw_data_dir, suffix=".nii.gz", join=False, sort=True)
list_of_lists = [[os.path.join(raw_data_dir, i) for i in all_files if i[:len(j)].startswith(j) and
len(i) == (len(j) + 12)] for j in validation_files]
# Preprocess images, returns filenames list
# This runs in multiprocess
print("Acually preprocessing data...")
preprocessed_files = preprocess_MLPerf(model_dir, checkpoint_name, fold, fp16, list_of_lists,
validation_files, preprocessed_data_dir, num_threads_preprocessing)
print("Saving metadata of the preprocessed data...")
with open(os.path.join(preprocessed_data_dir, "preprocessed_files.pkl"), "wb") as f:
pickle.dump(preprocessed_files, f)
print("Preprocessed data saved to {:}".format(preprocessed_data_dir))
print("Done!")
| 40.934579
| 141
| 0.736073
|
import argparse
import numpy
import os
import pickle
import sys
import torch
from batchgenerators.augmentations.utils import pad_nd_image
from batchgenerators.utilities.file_and_folder_operations import subfiles
from nnunet.training.model_restore import load_model_and_checkpoint_files
from nnunet.inference.predict import preprocess_multithreaded
def preprocess_MLPerf(model, checkpoint_name, folds, fp16, list_of_lists, output_filenames, preprocessing_folder, num_threads_preprocessing):
assert len(list_of_lists) == len(output_filenames)
print("loading parameters for folds", folds)
trainer, params = load_model_and_checkpoint_files(model, folds, fp16, checkpoint_name=checkpoint_name)
print("starting preprocessing generator")
preprocessing = preprocess_multithreaded(trainer, list_of_lists, output_filenames, num_threads_preprocessing, None)
print("Preprocessing images...")
all_output_files = []
for preprocessed in preprocessing:
output_filename, (d, dct) = preprocessed
all_output_files.append(output_filename)
if isinstance(d, str):
data = np.load(d)
os.remove(d)
d = data
d = pad_nd_image(d, trainer.patch_size, "constant", None, False, None)
with open(os.path.join(preprocessing_folder, output_filename+ ".pkl"), "wb") as f:
pickle.dump([d, dct], f)
f.close()
return all_output_files
def preprocess_setup(preprocessed_data_dir):
print("Preparing for preprocessing data...")
fold = 1
validation_fold_file = '../models/image_segmentation/tensorflow/3d_unet_mlperf/inference/nnUNet/folds/fold1_validation.txt'
model_dir = 'build/result/nnUNet/3d_fullres/Task043_BraTS2019/nnUNetTrainerV2__nnUNetPlansv2.mlperf.1'
model_path = os.path.join(model_dir, "plans.pkl")
assert os.path.isfile(model_path), "Cannot find the model file {:}!".format(model_path)
checkpoint_name = "model_final_checkpoint"
fp16 = False
num_threads_preprocessing = 12
raw_data_dir = 'build/raw_data/nnUNet_raw_data/Task043_BraTS2019/imagesTr'
validation_files = []
with open(validation_fold_file) as f:
for line in f:
validation_files.append(line.rstrip())
if not os.path.isdir(preprocessed_data_dir):
os.makedirs(preprocessed_data_dir)
all_files = subfiles(raw_data_dir, suffix=".nii.gz", join=False, sort=True)
list_of_lists = [[os.path.join(raw_data_dir, i) for i in all_files if i[:len(j)].startswith(j) and
len(i) == (len(j) + 12)] for j in validation_files]
print("Acually preprocessing data...")
preprocessed_files = preprocess_MLPerf(model_dir, checkpoint_name, fold, fp16, list_of_lists,
validation_files, preprocessed_data_dir, num_threads_preprocessing)
print("Saving metadata of the preprocessed data...")
with open(os.path.join(preprocessed_data_dir, "preprocessed_files.pkl"), "wb") as f:
pickle.dump(preprocessed_files, f)
print("Preprocessed data saved to {:}".format(preprocessed_data_dir))
print("Done!")
| true
| true
|
7906c755afa7bc6dae1bb1fff9408b9892eb8f80
| 5,929
|
py
|
Python
|
benchmark.py
|
nickjmiller/MLBenchmark
|
f6c3865c2dd5b71a471789041f3d800705371531
|
[
"MIT"
] | null | null | null |
benchmark.py
|
nickjmiller/MLBenchmark
|
f6c3865c2dd5b71a471789041f3d800705371531
|
[
"MIT"
] | null | null | null |
benchmark.py
|
nickjmiller/MLBenchmark
|
f6c3865c2dd5b71a471789041f3d800705371531
|
[
"MIT"
] | null | null | null |
import csv
from default_clf import DefaultNSL
from itertools import chain
from time import process_time
import numpy as np
import pandas as pd
NUM_PASSES = 100
NUM_ACC_PASSES = 50
TRAIN_PATH = 'data/KDDTrain+.csv'
TEST_PATH = 'data/KDDTest+.csv'
ATTACKS = {
'normal': 'normal',
'back': 'DoS',
'land': 'DoS',
'neptune': 'DoS',
'pod': 'DoS',
'smurf': 'DoS',
'teardrop': 'DoS',
'mailbomb': 'DoS',
'apache2': 'DoS',
'processtable': 'DoS',
'udpstorm': 'DoS',
'ipsweep': 'Probe',
'nmap': 'Probe',
'portsweep': 'Probe',
'satan': 'Probe',
'mscan': 'Probe',
'saint': 'Probe',
'ftp_write': 'R2L',
'guess_passwd': 'R2L',
'imap': 'R2L',
'multihop': 'R2L',
'phf': 'R2L',
'spy': 'R2L',
'warezclient': 'R2L',
'warezmaster': 'R2L',
'sendmail': 'R2L',
'named': 'R2L',
'snmpgetattack': 'R2L',
'snmpguess': 'R2L',
'xlock': 'R2L',
'xsnoop': 'R2L',
'worm': 'R2L',
'buffer_overflow': 'U2R',
'loadmodule': 'U2R',
'perl': 'U2R',
'rootkit': 'U2R',
'httptunnel': 'U2R',
'ps': 'U2R',
'sqlattack': 'U2R',
'xterm': 'U2R'
}
def get_current_charge():
try:
with open('/sys/class/power_supply/BAT0/charge_now') as f:
return int(f.readline())
except IOError:
print("Cannot find current battery charge.")
return 0
def check_load_training(clf, path):
start = process_time()
clf.load_training_data(path)
end = process_time()
return end - start
def check_load_testing(clf, path):
start = process_time()
clf.load_test_data(path)
end = process_time()
return end - start
def check_training(clf):
start = process_time()
clf.train_clf()
end = process_time()
return end - start
def check_testing_entire_dataset(clf, train=False):
start = process_time()
clf.test_clf(train)
end = process_time()
return end - start
def check_predict_row(clf, row):
start = process_time()
clf.predict(row)
end = process_time()
return end - start
def get_stats(arr, function, *args, **kwargs):
charge_start = get_current_charge()
for i in range(NUM_PASSES):
arr[i] = function(*args, **kwargs)
charge_end = get_current_charge()
mean = arr.mean()
std = arr.std()
return [mean, std, (charge_start - charge_end)]
def evaluate_power(clf):
res = np.empty(shape=(NUM_PASSES, 1))
load_train = get_stats(res, check_load_training, clf, TRAIN_PATH)
print('Loading Training: ', load_train)
load_test = get_stats(res, check_load_testing, clf, TEST_PATH)
print('Loading Testing: ', load_test)
train = get_stats(res, check_training, clf)
print('Training: ', train)
test_dataset = get_stats(res, check_testing_entire_dataset, clf)
print('Testing dataset: ', test_dataset)
row = clf.testing[0].iloc[0].values.reshape(1, -1)
test_row = get_stats(res, check_predict_row, clf, row)
print('Testing one row: ', test_row)
with open('results.csv', 'a', newline='') as csvf:
csv_writer = csv.writer(csvf)
csv_writer.writerow([clf.__class__.__name__, 'Number of Passes:', NUM_PASSES, 'Power'])
csv_writer.writerow(['Function', 'Time (s) Mean', 'Time Std',
'Total Power (microwatt-hour)'])
csv_writer.writerow(['Loading Training Data'] + load_train)
csv_writer.writerow(['Loading Testing Data'] + load_test)
csv_writer.writerow(['Training Classifier'] + train)
csv_writer.writerow(['Testing Dataset'] + test_dataset)
csv_writer.writerow(['Testing One Row'] + test_row)
def evaluate_accuracy(clf):
acc = np.empty(shape=(NUM_ACC_PASSES, 1))
clf.load_training_data(TRAIN_PATH)
clf.load_test_data(TEST_PATH)
cat_labels = clf.testing[1].apply(lambda x: ATTACKS[x])
cats = {'U2R':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))],
'DoS':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))],
'R2L':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))],
'Probe':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))],
'normal':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))]}
for i in range(0, NUM_ACC_PASSES):
clf.train_clf()
preds, acc[i] = clf.test_clf()
for cat, pred in zip(cat_labels, preds):
cats[cat][pred == 'normal'][i] += 1
clf.shuffle_training_data()
conf = calculate_category_accuracy(cats)
mean = acc.mean()
std = acc.std()
write_acc_to_csv([mean, std], cats, conf, clf.__class__.__name__)
return [mean, std]
def calculate_category_accuracy(cats):
conf = {'TN':np.zeros(shape=(NUM_ACC_PASSES, 1)), 'TP':np.zeros(shape=(NUM_ACC_PASSES, 1)),
'FN':np.zeros(shape=(NUM_ACC_PASSES, 1)), 'FP':np.zeros(shape=(NUM_ACC_PASSES, 1))}
for key, values in cats.items():
correct = values[0]
wrong = values[1]
if key == 'normal':
correct, wrong = wrong, correct
conf['TN'] += correct
conf['FP'] += wrong
else:
conf['TP'] += correct
conf['FN'] += wrong
avg = correct/(correct+wrong)
cats[key] = [avg.mean(), avg.std()]
return conf
def write_acc_to_csv(acc, cats, conf, name):
with open('results.csv', 'a', newline='') as csvf:
csv_writer = csv.writer(csvf)
csv_writer.writerow([name, 'Number of Passes:', NUM_ACC_PASSES, 'Accuracy'])
csv_writer.writerow(['Statistic', 'Mean', 'STD'])
csv_writer.writerow(['Accuracy'] + acc)
for key, values in cats.items():
csv_writer.writerow([key] + values)
for key, values in conf.items():
csv_writer.writerow([key, values.mean(), values.std()])
| 30.880208
| 96
| 0.611908
|
import csv
from default_clf import DefaultNSL
from itertools import chain
from time import process_time
import numpy as np
import pandas as pd
NUM_PASSES = 100
NUM_ACC_PASSES = 50
TRAIN_PATH = 'data/KDDTrain+.csv'
TEST_PATH = 'data/KDDTest+.csv'
ATTACKS = {
'normal': 'normal',
'back': 'DoS',
'land': 'DoS',
'neptune': 'DoS',
'pod': 'DoS',
'smurf': 'DoS',
'teardrop': 'DoS',
'mailbomb': 'DoS',
'apache2': 'DoS',
'processtable': 'DoS',
'udpstorm': 'DoS',
'ipsweep': 'Probe',
'nmap': 'Probe',
'portsweep': 'Probe',
'satan': 'Probe',
'mscan': 'Probe',
'saint': 'Probe',
'ftp_write': 'R2L',
'guess_passwd': 'R2L',
'imap': 'R2L',
'multihop': 'R2L',
'phf': 'R2L',
'spy': 'R2L',
'warezclient': 'R2L',
'warezmaster': 'R2L',
'sendmail': 'R2L',
'named': 'R2L',
'snmpgetattack': 'R2L',
'snmpguess': 'R2L',
'xlock': 'R2L',
'xsnoop': 'R2L',
'worm': 'R2L',
'buffer_overflow': 'U2R',
'loadmodule': 'U2R',
'perl': 'U2R',
'rootkit': 'U2R',
'httptunnel': 'U2R',
'ps': 'U2R',
'sqlattack': 'U2R',
'xterm': 'U2R'
}
def get_current_charge():
try:
with open('/sys/class/power_supply/BAT0/charge_now') as f:
return int(f.readline())
except IOError:
print("Cannot find current battery charge.")
return 0
def check_load_training(clf, path):
start = process_time()
clf.load_training_data(path)
end = process_time()
return end - start
def check_load_testing(clf, path):
start = process_time()
clf.load_test_data(path)
end = process_time()
return end - start
def check_training(clf):
start = process_time()
clf.train_clf()
end = process_time()
return end - start
def check_testing_entire_dataset(clf, train=False):
start = process_time()
clf.test_clf(train)
end = process_time()
return end - start
def check_predict_row(clf, row):
start = process_time()
clf.predict(row)
end = process_time()
return end - start
def get_stats(arr, function, *args, **kwargs):
charge_start = get_current_charge()
for i in range(NUM_PASSES):
arr[i] = function(*args, **kwargs)
charge_end = get_current_charge()
mean = arr.mean()
std = arr.std()
return [mean, std, (charge_start - charge_end)]
def evaluate_power(clf):
res = np.empty(shape=(NUM_PASSES, 1))
load_train = get_stats(res, check_load_training, clf, TRAIN_PATH)
print('Loading Training: ', load_train)
load_test = get_stats(res, check_load_testing, clf, TEST_PATH)
print('Loading Testing: ', load_test)
train = get_stats(res, check_training, clf)
print('Training: ', train)
test_dataset = get_stats(res, check_testing_entire_dataset, clf)
print('Testing dataset: ', test_dataset)
row = clf.testing[0].iloc[0].values.reshape(1, -1)
test_row = get_stats(res, check_predict_row, clf, row)
print('Testing one row: ', test_row)
with open('results.csv', 'a', newline='') as csvf:
csv_writer = csv.writer(csvf)
csv_writer.writerow([clf.__class__.__name__, 'Number of Passes:', NUM_PASSES, 'Power'])
csv_writer.writerow(['Function', 'Time (s) Mean', 'Time Std',
'Total Power (microwatt-hour)'])
csv_writer.writerow(['Loading Training Data'] + load_train)
csv_writer.writerow(['Loading Testing Data'] + load_test)
csv_writer.writerow(['Training Classifier'] + train)
csv_writer.writerow(['Testing Dataset'] + test_dataset)
csv_writer.writerow(['Testing One Row'] + test_row)
def evaluate_accuracy(clf):
acc = np.empty(shape=(NUM_ACC_PASSES, 1))
clf.load_training_data(TRAIN_PATH)
clf.load_test_data(TEST_PATH)
cat_labels = clf.testing[1].apply(lambda x: ATTACKS[x])
cats = {'U2R':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))],
'DoS':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))],
'R2L':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))],
'Probe':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))],
'normal':[np.zeros(shape=(NUM_ACC_PASSES, 1)), np.zeros(shape=(NUM_ACC_PASSES, 1))]}
for i in range(0, NUM_ACC_PASSES):
clf.train_clf()
preds, acc[i] = clf.test_clf()
for cat, pred in zip(cat_labels, preds):
cats[cat][pred == 'normal'][i] += 1
clf.shuffle_training_data()
conf = calculate_category_accuracy(cats)
mean = acc.mean()
std = acc.std()
write_acc_to_csv([mean, std], cats, conf, clf.__class__.__name__)
return [mean, std]
def calculate_category_accuracy(cats):
conf = {'TN':np.zeros(shape=(NUM_ACC_PASSES, 1)), 'TP':np.zeros(shape=(NUM_ACC_PASSES, 1)),
'FN':np.zeros(shape=(NUM_ACC_PASSES, 1)), 'FP':np.zeros(shape=(NUM_ACC_PASSES, 1))}
for key, values in cats.items():
correct = values[0]
wrong = values[1]
if key == 'normal':
correct, wrong = wrong, correct
conf['TN'] += correct
conf['FP'] += wrong
else:
conf['TP'] += correct
conf['FN'] += wrong
avg = correct/(correct+wrong)
cats[key] = [avg.mean(), avg.std()]
return conf
def write_acc_to_csv(acc, cats, conf, name):
with open('results.csv', 'a', newline='') as csvf:
csv_writer = csv.writer(csvf)
csv_writer.writerow([name, 'Number of Passes:', NUM_ACC_PASSES, 'Accuracy'])
csv_writer.writerow(['Statistic', 'Mean', 'STD'])
csv_writer.writerow(['Accuracy'] + acc)
for key, values in cats.items():
csv_writer.writerow([key] + values)
for key, values in conf.items():
csv_writer.writerow([key, values.mean(), values.std()])
| true
| true
|
7906c8adbbc21e3109745b8fd73b36a82946962a
| 1,778
|
py
|
Python
|
utils/data_loader_2.py
|
Dorky-Lever/vpv
|
0f156b2ad79cbb7060140434e34b5841ab5b1a26
|
[
"Apache-2.0"
] | null | null | null |
utils/data_loader_2.py
|
Dorky-Lever/vpv
|
0f156b2ad79cbb7060140434e34b5841ab5b1a26
|
[
"Apache-2.0"
] | null | null | null |
utils/data_loader_2.py
|
Dorky-Lever/vpv
|
0f156b2ad79cbb7060140434e34b5841ab5b1a26
|
[
"Apache-2.0"
] | null | null | null |
"""
Load volumes into vpv from a toml config file. Just load volumes and no overlays
Examples
--------
Example toml file
orientation = 'sagittal'
[top]
specimens = [
'path1.nrrd',
'path2.nrrd',
'path3.nrrd']
[bottom]
specimens = [
'path1.nrrd',
'path2.nrrd',
'path3.nrrd']
"""
import sys
from pathlib import Path
from itertools import chain
import toml
from PyQt5 import QtGui
from vpv.vpv import Vpv
from vpv.common import Layers
from typing import Dict
def load(config: Dict):
top_vols = config['top']['specimens']
bottom = config['bottom']['specimens']
if bottom:
bottom_vols = config['bottom']['specimens']
else: # We allow only top vier visible
bottom_specs = []
bottom_vols = []
bottom_labels = []
app = QtGui.QApplication([])
ex = Vpv()
p2s = lambda x: [str(z) for z in x]
all_vols = top_vols + bottom_vols
ex.load_volumes(chain(p2s(top_vols), p2s(bottom_vols)), 'vol')
# Set the top row of views
for i in range(3):
try:
vol_id = Path(top_vols[i]).stem
ex.views[i].layers[Layers.vol1].set_volume(vol_id)
except IndexError:
continue
if bottom:
# Set the top row of views
for i in range(3):
try:
vol_id = Path(bottom_vols[i]).stem
ex.views[i + 3].layers[Layers.vol1].set_volume(vol_id)
except IndexError:
continue
print('Finished loading')
# Show two rows
ex.data_manager.show2Rows(True if bottom else False)
# Set orientation
ex.data_manager.on_orientation(config['orientation'])
sys.exit(app.exec_())
if __name__ == '__main__':
file_ = sys.argv[1]
config = toml.load(file_)
load(config)
| 19.326087
| 80
| 0.615298
|
import sys
from pathlib import Path
from itertools import chain
import toml
from PyQt5 import QtGui
from vpv.vpv import Vpv
from vpv.common import Layers
from typing import Dict
def load(config: Dict):
top_vols = config['top']['specimens']
bottom = config['bottom']['specimens']
if bottom:
bottom_vols = config['bottom']['specimens']
else:
bottom_specs = []
bottom_vols = []
bottom_labels = []
app = QtGui.QApplication([])
ex = Vpv()
p2s = lambda x: [str(z) for z in x]
all_vols = top_vols + bottom_vols
ex.load_volumes(chain(p2s(top_vols), p2s(bottom_vols)), 'vol')
for i in range(3):
try:
vol_id = Path(top_vols[i]).stem
ex.views[i].layers[Layers.vol1].set_volume(vol_id)
except IndexError:
continue
if bottom:
for i in range(3):
try:
vol_id = Path(bottom_vols[i]).stem
ex.views[i + 3].layers[Layers.vol1].set_volume(vol_id)
except IndexError:
continue
print('Finished loading')
ex.data_manager.show2Rows(True if bottom else False)
ex.data_manager.on_orientation(config['orientation'])
sys.exit(app.exec_())
if __name__ == '__main__':
file_ = sys.argv[1]
config = toml.load(file_)
load(config)
| true
| true
|
7906c9506a6dc912a5d82b27e830bdb50e302cea
| 2,249
|
py
|
Python
|
train/train_superpixels_graph_classification.py
|
nfkjsfoeif/AutoGCN
|
4496bc066936d93b2e852c8010d95fb372910a80
|
[
"MIT"
] | 2
|
2020-06-27T15:17:23.000Z
|
2020-09-26T13:23:27.000Z
|
train/train_superpixels_graph_classification.py
|
nfkjsfoeif/AutoGCN
|
4496bc066936d93b2e852c8010d95fb372910a80
|
[
"MIT"
] | null | null | null |
train/train_superpixels_graph_classification.py
|
nfkjsfoeif/AutoGCN
|
4496bc066936d93b2e852c8010d95fb372910a80
|
[
"MIT"
] | 1
|
2020-09-16T14:58:24.000Z
|
2020-09-16T14:58:24.000Z
|
"""
Utility functions for training one epoch
and evaluating one epoch
"""
import torch
import torch.nn as nn
import math
from train.metrics import accuracy_MNIST_CIFAR as accuracy
def train_epoch(model, optimizer, device, data_loader, epoch=0):
model.train()
epoch_loss = 0
epoch_train_acc = 0
nb_data = 0
gpu_mem = 0
for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
batch_snorm_e = batch_snorm_e.to(device)
batch_labels = batch_labels.to(device)
batch_snorm_n = batch_snorm_n.to(device) # num x 1
optimizer.zero_grad()
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
loss = model.loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
epoch_train_acc += accuracy(batch_scores, batch_labels)
nb_data += batch_labels.size(0)
epoch_loss /= (iter + 1)
epoch_train_acc /= nb_data
return epoch_loss, epoch_train_acc, optimizer
def evaluate_network(model, device, data_loader, epoch=0):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
with torch.no_grad():
for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_snorm_e = batch_snorm_e.to(device)
batch_labels = batch_labels.to(device)
batch_snorm_n = batch_snorm_n.to(device)
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(batch_scores, batch_labels)
nb_data += batch_labels.size(0)
epoch_test_loss /= (iter + 1)
epoch_test_acc /= nb_data
return epoch_test_loss, epoch_test_acc
| 38.775862
| 103
| 0.663406
|
import torch
import torch.nn as nn
import math
from train.metrics import accuracy_MNIST_CIFAR as accuracy
def train_epoch(model, optimizer, device, data_loader, epoch=0):
model.train()
epoch_loss = 0
epoch_train_acc = 0
nb_data = 0
gpu_mem = 0
for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_snorm_e = batch_snorm_e.to(device)
batch_labels = batch_labels.to(device)
batch_snorm_n = batch_snorm_n.to(device)
optimizer.zero_grad()
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
loss = model.loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
epoch_train_acc += accuracy(batch_scores, batch_labels)
nb_data += batch_labels.size(0)
epoch_loss /= (iter + 1)
epoch_train_acc /= nb_data
return epoch_loss, epoch_train_acc, optimizer
def evaluate_network(model, device, data_loader, epoch=0):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
with torch.no_grad():
for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_snorm_e = batch_snorm_e.to(device)
batch_labels = batch_labels.to(device)
batch_snorm_n = batch_snorm_n.to(device)
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(batch_scores, batch_labels)
nb_data += batch_labels.size(0)
epoch_test_loss /= (iter + 1)
epoch_test_acc /= nb_data
return epoch_test_loss, epoch_test_acc
| true
| true
|
7906c9ef860a7c6851f9223f436614bc5f4fcb11
| 12,739
|
py
|
Python
|
raynet/models.py
|
paschalidoud/raynet
|
bf468dadddaf30da9cf5b1ecdfbcf4f161476242
|
[
"MIT"
] | 76
|
2018-04-08T04:33:26.000Z
|
2021-09-24T15:05:45.000Z
|
raynet/models.py
|
paschalidoud/raynet
|
bf468dadddaf30da9cf5b1ecdfbcf4f161476242
|
[
"MIT"
] | 8
|
2018-08-24T16:56:19.000Z
|
2021-04-11T08:41:31.000Z
|
raynet/models.py
|
paschalidoud/raynet
|
bf468dadddaf30da9cf5b1ecdfbcf4f161476242
|
[
"MIT"
] | 18
|
2018-06-28T13:23:22.000Z
|
2021-03-29T03:17:39.000Z
|
import os
import h5py
import numpy as np
from keras import backend as K
from keras.layers import Activation, BatchNormalization, Conv2D, Dense, Dot, \
Dropout, Flatten, Input, MaxPooling2D, GlobalAveragePooling2D
from keras import regularizers
from keras.layers import Average as KerasAverage
from keras.models import Sequential, Model
from keras.optimizers import Adam, SGD
from keras.engine.topology import Layer
from .layers import LayerNormalization, CustomSoftmax
from .tf_implementations.loss_functions import loss_factory
class TotalReshape(Layer):
def __init__(self, target_shape, **kwargs):
self.target_shape = target_shape
super(TotalReshape, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return tuple(
x if x != -1 else None
for x in self.target_shape
)
def call(self, x):
return K.reshape(x, self.target_shape)
class BaseReducer(Layer):
def __init__(self, **kwargs):
super(BaseReducer, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return input_shape[:-1]
class Average(BaseReducer):
def call(self, x):
return K.mean(x, axis=-1)
class Max(BaseReducer):
def call(self, x):
return K.max(x, axis=-1)
class TopKAverage(BaseReducer):
def __init__(self, k, **kwargs):
self.k = k
super(TopKAverage, self).__init__(**kwargs)
def call(self, x):
if K.backend() == "tensorflow":
tf = K.tf
x, _ = tf.nn.top_k(x, self.k, sorted=False)
return K.mean(x, axis=-1)
else:
raise NotImplementedError("TopKAverage is not implemented for "
" %s backend" % (K.backend(),))
def reducer_factory(reducer, k=3):
# Set the type of the reducer to be used
if reducer == "max":
return Max()
elif reducer == "average":
return Average()
elif reducer == "topK":
return TopKAverage(k)
def mae(y_true, y_pred):
""" Implementation of Mean average error
"""
return K.mean(K.abs(y_true - y_pred))
def mde(y_true, y_pred):
return K.mean(K.cast(
K.abs(K.argmax(y_true, axis=1) - K.argmax(y_pred, axis=1)),
K.floatx()
))
def create_simple_cnn(input_shape, kernel_regularizer=None):
common_params = dict(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
)
return Sequential([
Conv2D(input_shape=input_shape, **common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization()
])
def create_simple_cnn_ln(input_shape, kernel_regularizer=None):
common_params = dict(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
)
return Sequential([
Conv2D(input_shape=input_shape, **common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization()
])
def create_dilated_cnn_receptive_field_25(
input_shape,
kernel_regularizer=None
):
return Sequential([
Conv2D(
filters=32,
kernel_size=5,
input_shape=input_shape,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer,
dilation_rate=2
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer,
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization()
])
def create_dilated_cnn_receptive_field_25_with_tanh(
input_shape,
kernel_regularizer=None
):
return Sequential([
Conv2D(
filters=32,
kernel_size=5,
input_shape=input_shape,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer,
dilation_rate=2
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer,
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization()
])
def create_hartmann_cnn(input_shape, kernel_regularizer=None):
return Sequential([
Conv2D(filters=32, kernel_size=5, input_shape=input_shape),
Activation("tanh"),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(filters=64, kernel_size=5),
Activation("tanh"),
MaxPooling2D(pool_size=(2, 2))
])
def cnn_factory(name):
cnn_factories = {
"simple_cnn": create_simple_cnn,
"simple_cnn_ln": create_simple_cnn_ln,
"dilated_cnn_receptive_field_25":
create_dilated_cnn_receptive_field_25,
"dilated_cnn_receptive_field_25_with_tanh":
create_dilated_cnn_receptive_field_25_with_tanh,
"hartmann_cnn": create_hartmann_cnn
}
return cnn_factories[name]
def optimizer_factory(optimizer, lr, momentum=None, clipnorm=0.0, clipvalue=1):
# Set the type of optimizer to be used
if optimizer == "Adam":
return Adam(lr=lr, clipnorm=clipnorm, clipvalue=clipvalue)
elif optimizer == "SGD":
return SGD(lr=lr, momentum=momentum, clipnorm=clipnorm,
clipvalue=clipvalue)
def kernel_regularizer_factory(regularizer_factor):
if regularizer_factor == 0.0:
return None
else:
return regularizers.l2(regularizer_factor)
def build_simple_cnn(
input_shape,
create_cnn,
optimizer="Adam",
lr=1e-3,
momentum=None,
clipnorm=0.0,
loss="mse",
reducer="average",
merge_layer="dot-product",
weight_decay=None,
weight_file=None
):
# Make sure that we have a proper input shape
# TODO: Maybe change this to 3, because we finally need only the
# patch_shape?
assert len(input_shape) == 5
# Unpack the input shape to make the code more readable
D, N, W, H, C = input_shape
model = create_cnn(
input_shape=(None, None, C),
kernel_regularizer=weight_decay
)
model.compile(
optimizer=optimizer_factory(
optimizer,
lr=lr,
momentum=momentum,
clipnorm=clipnorm
),
loss=loss_factory(loss)
)
# If there is a weight file specified load the weights
if weight_file:
try:
f = h5py.File(weight_file, "r")
keys = [os.path.join(model.name, w.name)
for l in model.layers for w in l.weights]
weights = [f[os.path.join("model_weights", k)][:] for k in keys]
model.set_weights(weights)
except:
model.load_weights(weight_file, by_name=True)
return model
def build_simple_nn_for_training(
input_shape,
create_cnn,
optimizer="Adam",
lr=1e-3,
momentum=None,
clipnorm=0.0,
loss="emd",
reducer="average",
merge_layer="dot-product",
weight_decay=None,
weight_file=None
):
# Make sure that we have a proper input shape
assert len(input_shape) == 5
# Unpack the input shape to make the code more readable
D, N, W, H, C = input_shape
# Create the two stream inputs
x1_in = Input(shape=input_shape)
x2_in = Input(shape=input_shape)
# Reshape them for input in the CNN
x1 = TotalReshape((-1, W, H, C))(x1_in)
x2 = TotalReshape((-1, W, H, C))(x2_in)
# Create the CNN and extract features from both streams
cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)
x1 = Flatten()(cnn(x1))
x2 = Flatten()(cnn(x2))
# Compute a kind of similarity between the features of the two streams
x = Dot(axes=-1, normalize=(merge_layer == "cosine-similarity"))([x1, x2])
# Reshape them back into their semantic shape (depth planes, patches, etc)
x = TotalReshape((-1, D, N))(x)
# Compute the final similarity scores for each depth plane
x = reducer_factory(reducer)(x)
# Compute the final output
y = Activation("softmax")(x)
model = Model(inputs=[x1_in, x2_in], outputs=y)
model.compile(
optimizer=optimizer_factory(
optimizer,
lr=lr,
momentum=momentum,
clipnorm=clipnorm
),
loss=loss_factory(loss),
metrics=["accuracy", mae, mde]
)
if weight_file:
model.load_weights(weight_file, by_name=True)
return model
def build_hartmann_network(
input_shape,
create_cnn=create_hartmann_cnn,
optimizer="SGD",
lr=1e-3,
momentum=None,
clipnorm=0.0,
loss=None,
reducer=None,
merge_layer=None,
weight_decay=None,
weight_file=None
):
# Make sure that we have a proper input shape
assert len(input_shape) == 3
# Unpack the input shape to make the code more readable
H, W, C = input_shape
# Create the feature extracting CNN
cnn = create_hartmann_cnn(input_shape=(None, None, C))
# Create the similarity CNN
sim = Sequential([
Conv2D(
filters=2048,
kernel_size=5,
input_shape=K.int_shape(cnn.output)[1:]
),
Activation("relu"),
Conv2D(filters=2048, kernel_size=1),
Activation("relu"),
Conv2D(filters=2, kernel_size=1),
Activation("softmax")
])
# Create the joint model for training
x_in = [Input(shape=input_shape) for i in range(5)]
x = [cnn(xi) for xi in x_in]
x = KerasAverage()(x)
y = sim(x)
model = Model(inputs=x_in, outputs=y)
# Compile all the models
model.compile(
optimizer=optimizer_factory(
optimizer,
lr=lr,
momentum=momentum,
clipnorm=clipnorm
),
loss="categorical_crossentropy",
metrics=["accuracy"]
)
cnn.compile("sgd", "mse") # Just so that we can run predict()
sim.compile("sgd", "mse")
# Attach the cnn and sim to the model in case someone wants to use them
model.cnn = cnn
model.sim = sim
if weight_file:
model.load_weights(weight_file, by_name=True)
return model
def get_nn(name):
models = {
"simple_cnn": build_simple_cnn,
"simple_nn_for_training": build_simple_nn_for_training,
"hartmann": build_hartmann_network
}
return models[name]
| 26.539583
| 79
| 0.605464
|
import os
import h5py
import numpy as np
from keras import backend as K
from keras.layers import Activation, BatchNormalization, Conv2D, Dense, Dot, \
Dropout, Flatten, Input, MaxPooling2D, GlobalAveragePooling2D
from keras import regularizers
from keras.layers import Average as KerasAverage
from keras.models import Sequential, Model
from keras.optimizers import Adam, SGD
from keras.engine.topology import Layer
from .layers import LayerNormalization, CustomSoftmax
from .tf_implementations.loss_functions import loss_factory
class TotalReshape(Layer):
def __init__(self, target_shape, **kwargs):
self.target_shape = target_shape
super(TotalReshape, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return tuple(
x if x != -1 else None
for x in self.target_shape
)
def call(self, x):
return K.reshape(x, self.target_shape)
class BaseReducer(Layer):
def __init__(self, **kwargs):
super(BaseReducer, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return input_shape[:-1]
class Average(BaseReducer):
def call(self, x):
return K.mean(x, axis=-1)
class Max(BaseReducer):
def call(self, x):
return K.max(x, axis=-1)
class TopKAverage(BaseReducer):
def __init__(self, k, **kwargs):
self.k = k
super(TopKAverage, self).__init__(**kwargs)
def call(self, x):
if K.backend() == "tensorflow":
tf = K.tf
x, _ = tf.nn.top_k(x, self.k, sorted=False)
return K.mean(x, axis=-1)
else:
raise NotImplementedError("TopKAverage is not implemented for "
" %s backend" % (K.backend(),))
def reducer_factory(reducer, k=3):
if reducer == "max":
return Max()
elif reducer == "average":
return Average()
elif reducer == "topK":
return TopKAverage(k)
def mae(y_true, y_pred):
return K.mean(K.abs(y_true - y_pred))
def mde(y_true, y_pred):
return K.mean(K.cast(
K.abs(K.argmax(y_true, axis=1) - K.argmax(y_pred, axis=1)),
K.floatx()
))
def create_simple_cnn(input_shape, kernel_regularizer=None):
common_params = dict(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
)
return Sequential([
Conv2D(input_shape=input_shape, **common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization()
])
def create_simple_cnn_ln(input_shape, kernel_regularizer=None):
common_params = dict(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
)
return Sequential([
Conv2D(input_shape=input_shape, **common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization()
])
def create_dilated_cnn_receptive_field_25(
input_shape,
kernel_regularizer=None
):
return Sequential([
Conv2D(
filters=32,
kernel_size=5,
input_shape=input_shape,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer,
dilation_rate=2
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer,
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization()
])
def create_dilated_cnn_receptive_field_25_with_tanh(
input_shape,
kernel_regularizer=None
):
return Sequential([
Conv2D(
filters=32,
kernel_size=5,
input_shape=input_shape,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer,
dilation_rate=2
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer,
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization()
])
def create_hartmann_cnn(input_shape, kernel_regularizer=None):
return Sequential([
Conv2D(filters=32, kernel_size=5, input_shape=input_shape),
Activation("tanh"),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(filters=64, kernel_size=5),
Activation("tanh"),
MaxPooling2D(pool_size=(2, 2))
])
def cnn_factory(name):
cnn_factories = {
"simple_cnn": create_simple_cnn,
"simple_cnn_ln": create_simple_cnn_ln,
"dilated_cnn_receptive_field_25":
create_dilated_cnn_receptive_field_25,
"dilated_cnn_receptive_field_25_with_tanh":
create_dilated_cnn_receptive_field_25_with_tanh,
"hartmann_cnn": create_hartmann_cnn
}
return cnn_factories[name]
def optimizer_factory(optimizer, lr, momentum=None, clipnorm=0.0, clipvalue=1):
if optimizer == "Adam":
return Adam(lr=lr, clipnorm=clipnorm, clipvalue=clipvalue)
elif optimizer == "SGD":
return SGD(lr=lr, momentum=momentum, clipnorm=clipnorm,
clipvalue=clipvalue)
def kernel_regularizer_factory(regularizer_factor):
if regularizer_factor == 0.0:
return None
else:
return regularizers.l2(regularizer_factor)
def build_simple_cnn(
input_shape,
create_cnn,
optimizer="Adam",
lr=1e-3,
momentum=None,
clipnorm=0.0,
loss="mse",
reducer="average",
merge_layer="dot-product",
weight_decay=None,
weight_file=None
):
assert len(input_shape) == 5
D, N, W, H, C = input_shape
model = create_cnn(
input_shape=(None, None, C),
kernel_regularizer=weight_decay
)
model.compile(
optimizer=optimizer_factory(
optimizer,
lr=lr,
momentum=momentum,
clipnorm=clipnorm
),
loss=loss_factory(loss)
)
if weight_file:
try:
f = h5py.File(weight_file, "r")
keys = [os.path.join(model.name, w.name)
for l in model.layers for w in l.weights]
weights = [f[os.path.join("model_weights", k)][:] for k in keys]
model.set_weights(weights)
except:
model.load_weights(weight_file, by_name=True)
return model
def build_simple_nn_for_training(
input_shape,
create_cnn,
optimizer="Adam",
lr=1e-3,
momentum=None,
clipnorm=0.0,
loss="emd",
reducer="average",
merge_layer="dot-product",
weight_decay=None,
weight_file=None
):
assert len(input_shape) == 5
D, N, W, H, C = input_shape
x1_in = Input(shape=input_shape)
x2_in = Input(shape=input_shape)
x1 = TotalReshape((-1, W, H, C))(x1_in)
x2 = TotalReshape((-1, W, H, C))(x2_in)
cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)
x1 = Flatten()(cnn(x1))
x2 = Flatten()(cnn(x2))
x = Dot(axes=-1, normalize=(merge_layer == "cosine-similarity"))([x1, x2])
x = TotalReshape((-1, D, N))(x)
x = reducer_factory(reducer)(x)
y = Activation("softmax")(x)
model = Model(inputs=[x1_in, x2_in], outputs=y)
model.compile(
optimizer=optimizer_factory(
optimizer,
lr=lr,
momentum=momentum,
clipnorm=clipnorm
),
loss=loss_factory(loss),
metrics=["accuracy", mae, mde]
)
if weight_file:
model.load_weights(weight_file, by_name=True)
return model
def build_hartmann_network(
input_shape,
create_cnn=create_hartmann_cnn,
optimizer="SGD",
lr=1e-3,
momentum=None,
clipnorm=0.0,
loss=None,
reducer=None,
merge_layer=None,
weight_decay=None,
weight_file=None
):
assert len(input_shape) == 3
H, W, C = input_shape
cnn = create_hartmann_cnn(input_shape=(None, None, C))
sim = Sequential([
Conv2D(
filters=2048,
kernel_size=5,
input_shape=K.int_shape(cnn.output)[1:]
),
Activation("relu"),
Conv2D(filters=2048, kernel_size=1),
Activation("relu"),
Conv2D(filters=2, kernel_size=1),
Activation("softmax")
])
x_in = [Input(shape=input_shape) for i in range(5)]
x = [cnn(xi) for xi in x_in]
x = KerasAverage()(x)
y = sim(x)
model = Model(inputs=x_in, outputs=y)
model.compile(
optimizer=optimizer_factory(
optimizer,
lr=lr,
momentum=momentum,
clipnorm=clipnorm
),
loss="categorical_crossentropy",
metrics=["accuracy"]
)
cnn.compile("sgd", "mse")
sim.compile("sgd", "mse")
model.cnn = cnn
model.sim = sim
if weight_file:
model.load_weights(weight_file, by_name=True)
return model
def get_nn(name):
models = {
"simple_cnn": build_simple_cnn,
"simple_nn_for_training": build_simple_nn_for_training,
"hartmann": build_hartmann_network
}
return models[name]
| true
| true
|
7906cb3e8d2fbd60b32822840f3b999b05b17ae5
| 840
|
py
|
Python
|
dbcreate.py
|
killerswan/dbfilter
|
eff41896be747a1839970fc8cac424e3963275e5
|
[
"0BSD"
] | null | null | null |
dbcreate.py
|
killerswan/dbfilter
|
eff41896be747a1839970fc8cac424e3963275e5
|
[
"0BSD"
] | null | null | null |
dbcreate.py
|
killerswan/dbfilter
|
eff41896be747a1839970fc8cac424e3963275e5
|
[
"0BSD"
] | null | null | null |
import sqlite3
from common import newpathrel
def create_sample_db():
# connect to or create a new database
conn = sqlite3.connect(newpathrel('sample.sqlite3'))
# get a cursor to it
cur = conn.cursor()
# create a table
cur.execute('''
create table monkeys
(name text, color text, favorite_food_to_steal text)
''')
# add data to the table
data = [
('kevin', 'blonde', 'pancakes'),
('natalie', 'brown', 'beef hoof'),
('natalie and kevin', 'brown', 'hamburgers at Hut\'s'),
('kevin c', 'purple', 'cherry-nut ice cream, with red cherries'),
]
cur.executemany('insert into monkeys values (?,?,?)', data)
# save these changes
conn.commit()
conn.close()
if __name__ == '__main__':
create_sample_db()
| 24.705882
| 77
| 0.580952
|
import sqlite3
from common import newpathrel
def create_sample_db():
conn = sqlite3.connect(newpathrel('sample.sqlite3'))
cur = conn.cursor()
cur.execute('''
create table monkeys
(name text, color text, favorite_food_to_steal text)
''')
data = [
('kevin', 'blonde', 'pancakes'),
('natalie', 'brown', 'beef hoof'),
('natalie and kevin', 'brown', 'hamburgers at Hut\'s'),
('kevin c', 'purple', 'cherry-nut ice cream, with red cherries'),
]
cur.executemany('insert into monkeys values (?,?,?)', data)
# save these changes
conn.commit()
conn.close()
if __name__ == '__main__':
create_sample_db()
| true
| true
|
7906cbfef710c408ea7b3bff35d4ca149cc72c47
| 21,537
|
py
|
Python
|
interboard.py
|
dequis/wakarimasen
|
18dce03158b52f6030d18c4c532e42daeb089adc
|
[
"WTFPL"
] | 17
|
2015-02-25T04:34:47.000Z
|
2022-01-17T07:17:05.000Z
|
interboard.py
|
weedy/wakarimasen
|
6984dd50de66bc8784a180a3cee685ce98c93aec
|
[
"WTFPL"
] | 4
|
2015-01-09T18:20:50.000Z
|
2016-07-16T06:11:26.000Z
|
interboard.py
|
weedy/wakarimasen
|
6984dd50de66bc8784a180a3cee685ce98c93aec
|
[
"WTFPL"
] | 3
|
2016-06-27T19:12:45.000Z
|
2021-01-03T06:08:19.000Z
|
'''Operations that affect multiple boards or the entire site,
e.g., transferring and merging threads.'''
import time
import re
import os
import sys
import traceback
from datetime import datetime
from calendar import timegm
from subprocess import Popen
import config
import strings
import board
import staff
import model
import util
import str_format
import misc
from template import Template
from util import WakaError, local
from sqlalchemy.sql import or_, and_, select
# Common Site Table!
def get_all_boards(check_board_name=''):
'''Get all the board names. All of them.'''
session = model.Session()
table = model.common
sql = select([table.c.board]).order_by(table.c.board)
query = session.execute(sql)
board_present = False
boards = []
for row in query:
boards.append({'board_entry' : row['board']})
if row['board'] == check_board_name:
board_present = True
if check_board_name and not board_present:
add_board_to_index(check_board_name)
boards.append({'board_entry' : check_board_name})
return boards
def add_board_to_index(board_name):
session = model.Session()
table = model.common
sql = table.insert().values(board=board_name, type='')
session.execute(sql)
def remove_board_from_index(board_name):
session = model.Session()
table = model.common
sql = table.delete().where(table.c.board == board_name)
session.execute(sql)
# Board looping (andwich pattern).
def loop_thru_boards(board_obj_task, exc_msg, *args, **kwargs):
try:
boards = kwargs.pop('boards')
except KeyError:
boards = None
if not boards:
boards = [x['board_entry'] for x in get_all_boards()]
for board_str in boards:
try:
board_obj = board.Board(board_str)
local.environ['waka.board'] = board_obj
getattr(board_obj, board_obj_task)(*args, **kwargs)
board_obj.rebuild_cache()
except:
if exc_msg:
sys.stderr.write(exc_msg % board_str + '\n')
traceback.print_exc(file=sys.stderr)
# Global rebuilding
def global_cache_rebuild():
loop_thru_boards('rebuild_cache', 'Error in global cache rebuild in %s')
def global_cache_rebuild_proxy(task_data):
if task_data.user.account != staff.ADMIN:
raise WakaError(strings.INSUFFICIENTPRIVILEGES)
Popen([sys.executable, sys.argv[0], 'rebuild_global_cache'],
env=util.proxy_environ())
referer = local.environ['HTTP_REFERER']
task_data.contents.append(referer)
return util.make_http_forward(referer, config.ALTERNATE_REDIRECT)
# Global post management.
def process_global_delete_by_ip(ip, boards):
loop_thru_boards(
'delete_by_ip',
'Error in deleting posts from %s in %%s' % ip,
task_data = None,
ip = ip,
boards = boards
)
# Bans and Whitelists
def add_admin_entry(task_data, option, comment, ip='', mask='255.255.255.255',
sval1='', total='', expiration=0,
caller=''):
session = model.Session()
table = model.admin
ival1 = ival2 = 0
if not comment:
raise WakaError(strings.COMMENT_A_MUST)
if option in ('ipban', 'whitelist'):
if not ip:
raise WakaError('IP address required.')
if not mask:
mask = '255.255.255.255'
# Convert to decimal.
(ival1, ival2) = (misc.dot_to_dec(ip), misc.dot_to_dec(mask))
sql = table.select().where(table.c.type == option)
query = session.execute(sql)
for row in query:
try:
if int(row.ival1) & int(row.ival2) == ival1 & ival2:
raise WakaError('IP address and mask match ban #%d.' % \
(row.num))
except ValueError:
raise WakaError("Entry #%s on ban table is inconsistent. "
"This shouldn't happen." % row.num)
# Add info to task data.
content = ip + (' (' + mask + ')' if mask else '')
if total == 'yes':
add_htaccess_entry(ip)
content += ' (no browse)'
content += ' "' + comment + '"'
task_data.contents.append(content)
else:
if not sval1:
raise WakaError(strings.STRINGFIELDMISSING)
sql = table.select().where(and_(table.c.sval1 == sval1,
table.c.type == option))
row = session.execute(sql).fetchone()
if row:
raise WakaError('Duplicate String in ban #%d.' % (row.num))
# Add ifno to task data.
task_data.contents.append(sval1)
comment = str_format.clean_string(\
str_format.decode_string(comment, config.CHARSET))
expiration = int(expiration) if expiration else 0
if expiration:
expiration = expiration + time.time()
sql = table.insert().values(type=option, comment=comment, ival1=int(ival1),
ival2=int(ival2), sval1=sval1, total=total,
expiration=expiration)
result = session.execute(sql)
task_data.admin_id = result.inserted_primary_key[0]
# Add specific action name to task data.
task_data.action = option
board = local.environ['waka.board']
forward_url = misc.make_script_url(task='bans', board=board.name)
if caller == 'window':
return Template('edit_successful')
return util.make_http_forward(forward_url, config.ALTERNATE_REDIRECT)
def remove_admin_entry(task_data, num, override_log=False, no_redirect=False):
session = model.Session()
table = model.admin
sql = table.select().where(table.c.num == num)
row = session.execute(sql).fetchone()
if not row:
raise WakaError('Entry not found. Deleted?')
ival1 = row['ival1']
ip = misc.dec_to_dot(ival1) if ival1 else ''
string_val = row['sval1']
if row['total']:
remove_htaccess_entry(ip)
sql = table.delete().where(table.c.num == num)
session.execute(sql)
task_data.action = row['type'] + '_remove'
if string_val:
task_data.contents.append(row['sval1'])
else:
task_data.contents.append(ip + ' (' + misc.dec_to_dot(row['ival2']) \
+ ')')
board = local.environ['waka.board']
forward_url = misc.make_script_url(task='bans', board=board.name)
return util.make_http_forward(forward_url, config.ALTERNATE_REDIRECT)
def remove_old_bans():
session = model.Session()
table = model.admin
sql = select([table.c.ival1, table.c.total],
and_(table.c.expiration <= time.time(),
table.c.expiration != 0))
query = session.execute(sql)
for row in query:
sql = table.delete().where(table.c.ival1 == row['ival1'])
session.execute(sql)
if row['total']:
ip = misc.dec_to_dot(row['ival1'])
remove_htaccess_entry(ip)
def remove_old_backups():
session = model.Session()
table = model.backup
sql = table.select().where(table.c.timestampofarchival.op('+')\
(config.POST_BACKUP_EXPIRE) <= time.time())
query = session.execute(sql)
for row in query:
board_obj = board.Board(row['board_name'])
backup_path = os.path.join(board_obj.path,
board_obj.options['ARCHIVE_DIR'],
board_obj.options['BACKUP_DIR'], '')
if row.image:
# Delete backup image; then, mark post for deletion.
filename = os.path.join(backup_path, os.path.basename(row.image))
if os.path.exists(filename):
os.unlink(filename)
if row.thumbnail \
and re.match(board_obj.options['THUMB_DIR'], row.thumbnail):
filename = os.path.join(backup_path,
os.path.basename(row.thumbnail))
if os.path.exists(filename):
os.unlink(filename)
# Perform SQL DELETE
sql = table.delete().where(table.c.timestampofarchival.op('+')\
(config.POST_BACKUP_EXPIRE) <= time.time())
session.execute(sql)
def add_htaccess_entry(ip):
htaccess = os.path.join(local.environ['DOCUMENT_ROOT'],
config.HTACCESS_PATH, '.htaccess')
with util.FileLock(htaccess):
with open(htaccess, 'r') as f:
ban_entries_found = False
line = f.readline()
while line:
if line.count('RewriteEngine On'):
ban_entries_found = True
break
line = f.readline()
with open(htaccess, 'a') as f:
if not ban_entries_found:
f.write("\n"+'# Bans added by Wakarimasen'+"\n")
f.write("\n"+'RewriteEngine On'+"\n")
ip = ip.replace('.', r'\.')
f.write('RewriteCond %{REMOTE_ADDR} ^'+ip+'$'+"\n")
f.write('RewriteRule !(\+pl|\+js$|\+css$|\+png'\
'|ban_images) '+local.environ['SCRIPT_NAME']+'?'\
'task=banreport&board='\
+local.environ['waka.board'].name+"\n")
def remove_htaccess_entry(ip):
ip = ip.replace('.', r'\.')
htaccess = os.path.join(local.environ['DOCUMENT_ROOT'],
config.HTACCESS_PATH, '.htaccess')
with util.FileLock(htaccess):
lines = []
with open(htaccess, 'r') as f:
line = f.readline()
while line:
if not line.count('RewriteCond %{REMOTE_ADDR} ^' + ip + '$'):
lines.append(line)
else:
# Do not write, and skip the next line.
line = f.readline()
if line:
line = f.readline()
with open(htaccess, 'w') as f:
f.write(''.join(lines))
def ban_check(numip, name, subject, comment):
'''This function raises an exception if the IP address is banned, or
the post contains a forbidden (non-spam) string. It otherwise returns
nothing.'''
session = model.Session()
table = model.admin
# IP Banned?
sql = table.select().where(and_(table.c.type == 'ipban',
table.c.ival1.op('&')(table.c.ival2) \
== table.c.ival2.op('&')(numip)))
ip_row = session.execute(sql).fetchone()
if ip_row:
raise WakaError('Address %s banned. Reason: %s' % \
(misc.dec_to_dot(numip), ip_row.comment))
# To determine possible string bans, first normalize input to lowercase.
comment = comment.lower()
subject = subject.lower()
name = name.lower()
sql = select([table.c.sval1], table.c.type == 'wordban')
query = session.execute(sql)
for row in query:
bad_string = row.sval1.lower()
if comment.count(bad_string) or subject.count(bad_string) or \
name.count(bad_string):
raise WakaError(strings.STRREF)
def mark_resolved(task_data, delete, posts):
referer = local.environ['HTTP_REFERER']
user = task_data.user
errors = []
board_obj = None
old_board_obj = local.environ['waka.board']
for (board_name, posts) in posts.iteritems():
# Access rights enforcement.
if user.account == staff.MODERATOR and board_name not in user.reign:
errors.append({'error' : '/%s/*: Sorry, you lack access rights.'\
% (board_name)})
continue
for post in posts:
session = model.Session()
table = model.report
sql = table.select().where(and_(table.c.postnum == post,
table.c.board == board_name))
row = session.execute(sql).fetchone()
if not row:
errors.append({'error' : '%s,%d: Report not found.'\
% (board_name, int(post))})
continue
sql = table.delete().where(and_(table.c.postnum == post,
table.c.board == board_name))
session.execute(sql)
# Log the resolved post.
task_data.contents.append('/'.join(['', board_name, post]))
if delete:
try:
board_obj = board.Board(board_name)
local.environ['waka.board'] = board_obj
except WakaError:
errors.append({'error' : '%s,*: Error loading board.'\
% (board_name)})
continue
try:
board_obj.delete_stuff(posts, '', False, False,
admindelete=True,
admin_data=task_data)
except WakaError:
errors.append({'error' : '%s,%d: Post already deleted.'\
% (board_name, int(post))})
local.environ['waka.board'] = old_board_obj
# TODO: This probably should be refactored into StaffInterface.
return Template('report_resolved', errors=errors,
error_occurred=len(errors)>0,
admin=user.login_data.cookie,
username=user.username,
type=user.account,
boards_select=user.reign,
referer=referer)
def edit_admin_entry(task_data, num, comment='', ival1=None,
ival2='255.255.255.255', sval1='', total=False,
sec=None, min=None, hour=None, day=None, month=None,
year=None, noexpire=False):
session = model.Session()
table = model.admin
sql = table.select().where(table.c.num == num)
row = session.execute(sql).fetchone()
if not row:
raise WakaError('Entry was not created or was removed.')
task_data.action = row.type + '_edit'
if row.type in ('ipban', 'whitelist'):
if not noexpire:
try:
expiration = datetime(int(year), int(month), int(day),
int(hour), int(min), int(sec))
except:
raise WakaError('Invalid date.')
expiration = timegm(expiration.utctimetuple())
else:
expiration = 0
ival1 = misc.dot_to_dec(ival1)
ival2 = misc.dot_to_dec(ival2)
task_data.contents.append(ival1 + ' (' + ival2 + ')')
else:
expiration = 0
task_data.contents.append(sval1)
sql = table.update().where(table.c.num == num)\
.values(comment=comment, ival1=ival1, ival2=ival2, sval1=sval1,
total=total, expiration=expiration)
row = session.execute(sql)
return Template('edit_successful')
def delete_by_ip(task_data, ip, mask='255.255.255.255', caller=''):
task_data.contents.append(ip)
user = task_data.user
if user.account == staff.MODERATOR:
reign = user.reign
else:
reign = [x['board_entry'] for x in get_all_boards()]
Popen([sys.executable, sys.argv[0], 'delete_by_ip', ip, ','.join(reign)],
env=util.proxy_environ())
board_name = local.environ['waka.board'].name
redir = misc.make_script_url(task='mpanel', board=board_name)
if caller != 'internal':
return util.make_http_forward(redir, config.ALTERNATE_REDIRECT)
def trim_reported_posts(date=0):
mintime = 0
if date:
mintime = time.time() - date
elif config.REPORT_RETENTION:
mintime = time.time() - config.REPORT_RETENTION
if mintime > 0:
session = model.Session()
table = model.report
sql = table.delete().where(table.c.timestamp <= mintime)
session.execute(sql)
def trim_activity():
mintime = time.time() - config.STAFF_LOG_RETENTION
session = model.Session()
table = model.activity
sql = table.delete().where(table.c.timestamp <= mintime)
session.execute(sql)
def update_spam_file(task_data, spam):
if task_data.user.account == staff.MODERATOR:
raise WakaError(strings.INSUFFICIENTPRIVILEGES)
# Dump all contents to first spam file.
with open(config.SPAM_FILES[0], 'w') as f:
f.write(spam)
board = local.environ['waka.board']
forward_url = misc.make_script_url(task='spam', board=board.name)
return util.make_http_forward(forward_url, config.ALTERNATE_REDIRECT)
# Thread Transfer
def move_thread(task_data, parent, src_brd_obj, dest_brd_obj):
if not parent:
raise WakaError('No thread specified.')
if src_brd_obj.name == dest_brd_obj.name:
raise WakaError('Source and destination boards match.')
# Check administrative access rights to both boards.
user = task_data.user
user.check_access(src_brd_obj.name)
user.check_access(dest_brd_obj.name)
session = model.Session()
src_table = src_brd_obj.table
dest_table = dest_brd_obj.table
sql = select([src_table.c.parent], src_table.c.num == parent)
row = session.execute(sql).fetchone()
if not row:
raise WakaError('Thread not found.')
elif row[0]:
# Automatically correct if reply instead of thread was given.
parent = row[0]
sql = src_table.select().where(or_(src_table.c.num == parent,
src_table.c.parent == parent))\
.order_by(src_table.c.num.asc())
thread = [dict(x.items()) for x in session.execute(sql).fetchall()]
# Indicate OP post number after insertion.
new_parent = 0
# List of images/thumbs to move around.
image_move = []
thumb_move = []
lasthit = time.time()
# DB operations
for post in thread:
# Grab post contents as dictionary of updates. Remove primary key.
del post['num']
post['lasthit'] = lasthit
image = post['image']
thumbnail = post['thumbnail']
if image:
image_move.append(image)
if re.match(src_brd_obj.options['THUMB_DIR'], thumbnail):
thumb_move.append(thumbnail)
# Update post reference links.
if new_parent:
post['parent'] = new_parent
new_comment = re.sub(r'a href="(.*?)'
+ os.path.join(src_brd_obj.path,
src_brd_obj.options['RES_DIR'],
'%d%s' % (int(parent)), config.PAGE_EXT),
r'a href="\1' + os.path.join(\
dest_brd_obj.path,
dest_brd_obj.options['RES_DIR'],
'%d%s' % (int((new_parent), config.PAGE_EXT))),
post['comment'])
post['comment'] = new_comment
sql = dest_table.insert().values(**post)
result = session.execute(sql)
if not new_parent:
new_parent = result.inserted_primary_key[0]
# Nested associate for moving files in bulk.
def rename_files(filename_list, dir_type):
for filename in filename_list:
src_filename = os.path.join(src_brd_obj.path, filename)
dest_filename = re.sub('^/?' + src_brd_obj.options[dir_type],
dest_brd_obj.options[dir_type],
filename)
dest_filename = os.path.join(dest_brd_obj.path, dest_filename)
os.rename(src_filename, dest_filename)
# File transfer operations.
rename_files(image_move, 'IMG_DIR')
rename_files(thumb_move, 'THUMB_DIR')
dest_brd_obj.build_cache()
dest_brd_obj.build_thread_cache(new_parent)
src_brd_obj.delete_stuff([parent], '', False, False, caller='internal')
forward_url = misc.make_script_url(task='mpanel',
board=dest_brd_obj.name, page=('t%s' % new_parent))
# Log.
task_data.contents.append('/%s/%d to /%s/%d' \
% (src_brd_obj.name, int(parent),
dest_brd_obj.name, int(new_parent)))
return util.make_http_forward(forward_url)
# proxy
def add_proxy_entry(task_data, type, ip, timestamp):
session = model.Session()
table = model.proxy
if not misc.validate_ip(ip):
raise WakaError(strings.BADIP)
age = config.PROXY_WHITE_AGE if type == 'white' else config.PROXY_BLACK_AGE
timestamp = int(timestamp or '0') - age + time.time()
date = misc.make_date(time.time(), style=config.DATE_STYLE)
query = table.delete().where(table.c.ip == ip)
session.execute(query)
query = table.insert().values(
type=type,
ip=ip,
timestamp=timestamp,
date=date
)
session.execute(query)
board = local.environ['waka.board']
forward_url = misc.make_script_url(task='proxy', board=board.name)
return util.make_http_forward(forward_url, config.ALTERNATE_REDIRECT)
def remove_proxy_entry(task_data, num):
session = model.Session()
table = model.proxy
query = table.delete().where(table.c.num == num)
session.execute(query)
board = local.environ['waka.board']
forward_url = misc.make_script_url(task='proxy', board=board.name)
return util.make_http_forward(forward_url, config.ALTERNATE_REDIRECT)
| 33.916535
| 79
| 0.581418
|
import time
import re
import os
import sys
import traceback
from datetime import datetime
from calendar import timegm
from subprocess import Popen
import config
import strings
import board
import staff
import model
import util
import str_format
import misc
from template import Template
from util import WakaError, local
from sqlalchemy.sql import or_, and_, select
def get_all_boards(check_board_name=''):
session = model.Session()
table = model.common
sql = select([table.c.board]).order_by(table.c.board)
query = session.execute(sql)
board_present = False
boards = []
for row in query:
boards.append({'board_entry' : row['board']})
if row['board'] == check_board_name:
board_present = True
if check_board_name and not board_present:
add_board_to_index(check_board_name)
boards.append({'board_entry' : check_board_name})
return boards
def add_board_to_index(board_name):
session = model.Session()
table = model.common
sql = table.insert().values(board=board_name, type='')
session.execute(sql)
def remove_board_from_index(board_name):
session = model.Session()
table = model.common
sql = table.delete().where(table.c.board == board_name)
session.execute(sql)
def loop_thru_boards(board_obj_task, exc_msg, *args, **kwargs):
try:
boards = kwargs.pop('boards')
except KeyError:
boards = None
if not boards:
boards = [x['board_entry'] for x in get_all_boards()]
for board_str in boards:
try:
board_obj = board.Board(board_str)
local.environ['waka.board'] = board_obj
getattr(board_obj, board_obj_task)(*args, **kwargs)
board_obj.rebuild_cache()
except:
if exc_msg:
sys.stderr.write(exc_msg % board_str + '\n')
traceback.print_exc(file=sys.stderr)
def global_cache_rebuild():
loop_thru_boards('rebuild_cache', 'Error in global cache rebuild in %s')
def global_cache_rebuild_proxy(task_data):
if task_data.user.account != staff.ADMIN:
raise WakaError(strings.INSUFFICIENTPRIVILEGES)
Popen([sys.executable, sys.argv[0], 'rebuild_global_cache'],
env=util.proxy_environ())
referer = local.environ['HTTP_REFERER']
task_data.contents.append(referer)
return util.make_http_forward(referer, config.ALTERNATE_REDIRECT)
def process_global_delete_by_ip(ip, boards):
loop_thru_boards(
'delete_by_ip',
'Error in deleting posts from %s in %%s' % ip,
task_data = None,
ip = ip,
boards = boards
)
def add_admin_entry(task_data, option, comment, ip='', mask='255.255.255.255',
sval1='', total='', expiration=0,
caller=''):
session = model.Session()
table = model.admin
ival1 = ival2 = 0
if not comment:
raise WakaError(strings.COMMENT_A_MUST)
if option in ('ipban', 'whitelist'):
if not ip:
raise WakaError('IP address required.')
if not mask:
mask = '255.255.255.255'
(ival1, ival2) = (misc.dot_to_dec(ip), misc.dot_to_dec(mask))
sql = table.select().where(table.c.type == option)
query = session.execute(sql)
for row in query:
try:
if int(row.ival1) & int(row.ival2) == ival1 & ival2:
raise WakaError('IP address and mask match ban #%d.' % \
(row.num))
except ValueError:
raise WakaError("Entry #%s on ban table is inconsistent. "
"This shouldn't happen." % row.num)
# Add info to task data.
content = ip + (' (' + mask + ')' if mask else '')
if total == 'yes':
add_htaccess_entry(ip)
content += ' (no browse)'
content += ' "' + comment + '"'
task_data.contents.append(content)
else:
if not sval1:
raise WakaError(strings.STRINGFIELDMISSING)
sql = table.select().where(and_(table.c.sval1 == sval1,
table.c.type == option))
row = session.execute(sql).fetchone()
if row:
raise WakaError('Duplicate String in ban
# Add ifno to task data.
task_data.contents.append(sval1)
comment = str_format.clean_string(\
str_format.decode_string(comment, config.CHARSET))
expiration = int(expiration) if expiration else 0
if expiration:
expiration = expiration + time.time()
sql = table.insert().values(type=option, comment=comment, ival1=int(ival1),
ival2=int(ival2), sval1=sval1, total=total,
expiration=expiration)
result = session.execute(sql)
task_data.admin_id = result.inserted_primary_key[0]
# Add specific action name to task data.
task_data.action = option
board = local.environ['waka.board']
forward_url = misc.make_script_url(task='bans', board=board.name)
if caller == 'window':
return Template('edit_successful')
return util.make_http_forward(forward_url, config.ALTERNATE_REDIRECT)
def remove_admin_entry(task_data, num, override_log=False, no_redirect=False):
session = model.Session()
table = model.admin
sql = table.select().where(table.c.num == num)
row = session.execute(sql).fetchone()
if not row:
raise WakaError('Entry not found. Deleted?')
ival1 = row['ival1']
ip = misc.dec_to_dot(ival1) if ival1 else ''
string_val = row['sval1']
if row['total']:
remove_htaccess_entry(ip)
sql = table.delete().where(table.c.num == num)
session.execute(sql)
task_data.action = row['type'] + '_remove'
if string_val:
task_data.contents.append(row['sval1'])
else:
task_data.contents.append(ip + ' (' + misc.dec_to_dot(row['ival2']) \
+ ')')
board = local.environ['waka.board']
forward_url = misc.make_script_url(task='bans', board=board.name)
return util.make_http_forward(forward_url, config.ALTERNATE_REDIRECT)
def remove_old_bans():
session = model.Session()
table = model.admin
sql = select([table.c.ival1, table.c.total],
and_(table.c.expiration <= time.time(),
table.c.expiration != 0))
query = session.execute(sql)
for row in query:
sql = table.delete().where(table.c.ival1 == row['ival1'])
session.execute(sql)
if row['total']:
ip = misc.dec_to_dot(row['ival1'])
remove_htaccess_entry(ip)
def remove_old_backups():
session = model.Session()
table = model.backup
sql = table.select().where(table.c.timestampofarchival.op('+')\
(config.POST_BACKUP_EXPIRE) <= time.time())
query = session.execute(sql)
for row in query:
board_obj = board.Board(row['board_name'])
backup_path = os.path.join(board_obj.path,
board_obj.options['ARCHIVE_DIR'],
board_obj.options['BACKUP_DIR'], '')
if row.image:
# Delete backup image; then, mark post for deletion.
filename = os.path.join(backup_path, os.path.basename(row.image))
if os.path.exists(filename):
os.unlink(filename)
if row.thumbnail \
and re.match(board_obj.options['THUMB_DIR'], row.thumbnail):
filename = os.path.join(backup_path,
os.path.basename(row.thumbnail))
if os.path.exists(filename):
os.unlink(filename)
# Perform SQL DELETE
sql = table.delete().where(table.c.timestampofarchival.op('+')\
(config.POST_BACKUP_EXPIRE) <= time.time())
session.execute(sql)
def add_htaccess_entry(ip):
htaccess = os.path.join(local.environ['DOCUMENT_ROOT'],
config.HTACCESS_PATH, '.htaccess')
with util.FileLock(htaccess):
with open(htaccess, 'r') as f:
ban_entries_found = False
line = f.readline()
while line:
if line.count('RewriteEngine On'):
ban_entries_found = True
break
line = f.readline()
with open(htaccess, 'a') as f:
if not ban_entries_found:
f.write("\n"+'
f.write("\n"+'RewriteEngine On'+"\n")
ip = ip.replace('.', r'\.')
f.write('RewriteCond %{REMOTE_ADDR} ^'+ip+'$'+"\n")
f.write('RewriteRule !(\+pl|\+js$|\+css$|\+png'\
'|ban_images) '+local.environ['SCRIPT_NAME']+'?'\
'task=banreport&board='\
+local.environ['waka.board'].name+"\n")
def remove_htaccess_entry(ip):
ip = ip.replace('.', r'\.')
htaccess = os.path.join(local.environ['DOCUMENT_ROOT'],
config.HTACCESS_PATH, '.htaccess')
with util.FileLock(htaccess):
lines = []
with open(htaccess, 'r') as f:
line = f.readline()
while line:
if not line.count('RewriteCond %{REMOTE_ADDR} ^' + ip + '$'):
lines.append(line)
else:
# Do not write, and skip the next line.
line = f.readline()
if line:
line = f.readline()
with open(htaccess, 'w') as f:
f.write(''.join(lines))
def ban_check(numip, name, subject, comment):
session = model.Session()
table = model.admin
# IP Banned?
sql = table.select().where(and_(table.c.type == 'ipban',
table.c.ival1.op('&')(table.c.ival2) \
== table.c.ival2.op('&')(numip)))
ip_row = session.execute(sql).fetchone()
if ip_row:
raise WakaError('Address %s banned. Reason: %s' % \
(misc.dec_to_dot(numip), ip_row.comment))
# To determine possible string bans, first normalize input to lowercase.
comment = comment.lower()
subject = subject.lower()
name = name.lower()
sql = select([table.c.sval1], table.c.type == 'wordban')
query = session.execute(sql)
for row in query:
bad_string = row.sval1.lower()
if comment.count(bad_string) or subject.count(bad_string) or \
name.count(bad_string):
raise WakaError(strings.STRREF)
def mark_resolved(task_data, delete, posts):
referer = local.environ['HTTP_REFERER']
user = task_data.user
errors = []
board_obj = None
old_board_obj = local.environ['waka.board']
for (board_name, posts) in posts.iteritems():
# Access rights enforcement.
if user.account == staff.MODERATOR and board_name not in user.reign:
errors.append({'error' : '/%s/*: Sorry, you lack access rights.'\
% (board_name)})
continue
for post in posts:
session = model.Session()
table = model.report
sql = table.select().where(and_(table.c.postnum == post,
table.c.board == board_name))
row = session.execute(sql).fetchone()
if not row:
errors.append({'error' : '%s,%d: Report not found.'\
% (board_name, int(post))})
continue
sql = table.delete().where(and_(table.c.postnum == post,
table.c.board == board_name))
session.execute(sql)
# Log the resolved post.
task_data.contents.append('/'.join(['', board_name, post]))
if delete:
try:
board_obj = board.Board(board_name)
local.environ['waka.board'] = board_obj
except WakaError:
errors.append({'error' : '%s,*: Error loading board.'\
% (board_name)})
continue
try:
board_obj.delete_stuff(posts, '', False, False,
admindelete=True,
admin_data=task_data)
except WakaError:
errors.append({'error' : '%s,%d: Post already deleted.'\
% (board_name, int(post))})
local.environ['waka.board'] = old_board_obj
# TODO: This probably should be refactored into StaffInterface.
return Template('report_resolved', errors=errors,
error_occurred=len(errors)>0,
admin=user.login_data.cookie,
username=user.username,
type=user.account,
boards_select=user.reign,
referer=referer)
def edit_admin_entry(task_data, num, comment='', ival1=None,
ival2='255.255.255.255', sval1='', total=False,
sec=None, min=None, hour=None, day=None, month=None,
year=None, noexpire=False):
session = model.Session()
table = model.admin
sql = table.select().where(table.c.num == num)
row = session.execute(sql).fetchone()
if not row:
raise WakaError('Entry was not created or was removed.')
task_data.action = row.type + '_edit'
if row.type in ('ipban', 'whitelist'):
if not noexpire:
try:
expiration = datetime(int(year), int(month), int(day),
int(hour), int(min), int(sec))
except:
raise WakaError('Invalid date.')
expiration = timegm(expiration.utctimetuple())
else:
expiration = 0
ival1 = misc.dot_to_dec(ival1)
ival2 = misc.dot_to_dec(ival2)
task_data.contents.append(ival1 + ' (' + ival2 + ')')
else:
expiration = 0
task_data.contents.append(sval1)
sql = table.update().where(table.c.num == num)\
.values(comment=comment, ival1=ival1, ival2=ival2, sval1=sval1,
total=total, expiration=expiration)
row = session.execute(sql)
return Template('edit_successful')
def delete_by_ip(task_data, ip, mask='255.255.255.255', caller=''):
task_data.contents.append(ip)
user = task_data.user
if user.account == staff.MODERATOR:
reign = user.reign
else:
reign = [x['board_entry'] for x in get_all_boards()]
Popen([sys.executable, sys.argv[0], 'delete_by_ip', ip, ','.join(reign)],
env=util.proxy_environ())
board_name = local.environ['waka.board'].name
redir = misc.make_script_url(task='mpanel', board=board_name)
if caller != 'internal':
return util.make_http_forward(redir, config.ALTERNATE_REDIRECT)
def trim_reported_posts(date=0):
mintime = 0
if date:
mintime = time.time() - date
elif config.REPORT_RETENTION:
mintime = time.time() - config.REPORT_RETENTION
if mintime > 0:
session = model.Session()
table = model.report
sql = table.delete().where(table.c.timestamp <= mintime)
session.execute(sql)
def trim_activity():
mintime = time.time() - config.STAFF_LOG_RETENTION
session = model.Session()
table = model.activity
sql = table.delete().where(table.c.timestamp <= mintime)
session.execute(sql)
def update_spam_file(task_data, spam):
if task_data.user.account == staff.MODERATOR:
raise WakaError(strings.INSUFFICIENTPRIVILEGES)
# Dump all contents to first spam file.
with open(config.SPAM_FILES[0], 'w') as f:
f.write(spam)
board = local.environ['waka.board']
forward_url = misc.make_script_url(task='spam', board=board.name)
return util.make_http_forward(forward_url, config.ALTERNATE_REDIRECT)
# Thread Transfer
def move_thread(task_data, parent, src_brd_obj, dest_brd_obj):
if not parent:
raise WakaError('No thread specified.')
if src_brd_obj.name == dest_brd_obj.name:
raise WakaError('Source and destination boards match.')
# Check administrative access rights to both boards.
user = task_data.user
user.check_access(src_brd_obj.name)
user.check_access(dest_brd_obj.name)
session = model.Session()
src_table = src_brd_obj.table
dest_table = dest_brd_obj.table
sql = select([src_table.c.parent], src_table.c.num == parent)
row = session.execute(sql).fetchone()
if not row:
raise WakaError('Thread not found.')
elif row[0]:
# Automatically correct if reply instead of thread was given.
parent = row[0]
sql = src_table.select().where(or_(src_table.c.num == parent,
src_table.c.parent == parent))\
.order_by(src_table.c.num.asc())
thread = [dict(x.items()) for x in session.execute(sql).fetchall()]
# Indicate OP post number after insertion.
new_parent = 0
# List of images/thumbs to move around.
image_move = []
thumb_move = []
lasthit = time.time()
# DB operations
for post in thread:
# Grab post contents as dictionary of updates. Remove primary key.
del post['num']
post['lasthit'] = lasthit
image = post['image']
thumbnail = post['thumbnail']
if image:
image_move.append(image)
if re.match(src_brd_obj.options['THUMB_DIR'], thumbnail):
thumb_move.append(thumbnail)
# Update post reference links.
if new_parent:
post['parent'] = new_parent
new_comment = re.sub(r'a href="(.*?)'
+ os.path.join(src_brd_obj.path,
src_brd_obj.options['RES_DIR'],
'%d%s' % (int(parent)), config.PAGE_EXT),
r'a href="\1' + os.path.join(\
dest_brd_obj.path,
dest_brd_obj.options['RES_DIR'],
'%d%s' % (int((new_parent), config.PAGE_EXT))),
post['comment'])
post['comment'] = new_comment
sql = dest_table.insert().values(**post)
result = session.execute(sql)
if not new_parent:
new_parent = result.inserted_primary_key[0]
# Nested associate for moving files in bulk.
def rename_files(filename_list, dir_type):
for filename in filename_list:
src_filename = os.path.join(src_brd_obj.path, filename)
dest_filename = re.sub('^/?' + src_brd_obj.options[dir_type],
dest_brd_obj.options[dir_type],
filename)
dest_filename = os.path.join(dest_brd_obj.path, dest_filename)
os.rename(src_filename, dest_filename)
# File transfer operations.
rename_files(image_move, 'IMG_DIR')
rename_files(thumb_move, 'THUMB_DIR')
dest_brd_obj.build_cache()
dest_brd_obj.build_thread_cache(new_parent)
src_brd_obj.delete_stuff([parent], '', False, False, caller='internal')
forward_url = misc.make_script_url(task='mpanel',
board=dest_brd_obj.name, page=('t%s' % new_parent))
# Log.
task_data.contents.append('/%s/%d to /%s/%d' \
% (src_brd_obj.name, int(parent),
dest_brd_obj.name, int(new_parent)))
return util.make_http_forward(forward_url)
# proxy
def add_proxy_entry(task_data, type, ip, timestamp):
session = model.Session()
table = model.proxy
if not misc.validate_ip(ip):
raise WakaError(strings.BADIP)
age = config.PROXY_WHITE_AGE if type == 'white' else config.PROXY_BLACK_AGE
timestamp = int(timestamp or '0') - age + time.time()
date = misc.make_date(time.time(), style=config.DATE_STYLE)
query = table.delete().where(table.c.ip == ip)
session.execute(query)
query = table.insert().values(
type=type,
ip=ip,
timestamp=timestamp,
date=date
)
session.execute(query)
board = local.environ['waka.board']
forward_url = misc.make_script_url(task='proxy', board=board.name)
return util.make_http_forward(forward_url, config.ALTERNATE_REDIRECT)
def remove_proxy_entry(task_data, num):
session = model.Session()
table = model.proxy
query = table.delete().where(table.c.num == num)
session.execute(query)
board = local.environ['waka.board']
forward_url = misc.make_script_url(task='proxy', board=board.name)
return util.make_http_forward(forward_url, config.ALTERNATE_REDIRECT)
| true
| true
|
7906cd293271edf1db810f3042870d676c4e0208
| 1,185
|
py
|
Python
|
userbot/plugins/indanime.py
|
aksr-aashish/FIREXUSERBOT
|
dff0b7bf028cb27779626ce523402346cc990402
|
[
"MIT"
] | null | null | null |
userbot/plugins/indanime.py
|
aksr-aashish/FIREXUSERBOT
|
dff0b7bf028cb27779626ce523402346cc990402
|
[
"MIT"
] | 1
|
2022-01-09T11:35:06.000Z
|
2022-01-09T11:35:06.000Z
|
userbot/plugins/indanime.py
|
aksr-aashish/FIREXUSERBOT
|
dff0b7bf028cb27779626ce523402346cc990402
|
[
"MIT"
] | null | null | null |
# made by @Eviral
from . import *
@bot.on(admin_cmd(pattern="indanime(.*)"))
async def xd(event):
await event.edit("wishing to all🇮🇳🇮🇳...")
event.pattern_match.group(1)
async for tele in borg.iter_dialogs():
if tele.is_group:
chat = tele.id
lol = 0
done = 0
try:
await bot.send_message(
chat,
'⣿⣿⣿⣿⣿⣍⠀⠉⠻⠟⠻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⣿⡇⠀⠀⠀⠀⣰⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⣿⣿⠓⠀⠀⢒⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⡿⠃⠀⠀⠀⠀⠈⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠿⣿\n⣿⡿⠋⠋⠀⠀⠀⠀⠀⠀⠈⠙⠻⢿⢿⣿⣿⡿⣿⣿⡟⠋⠀⢀⣩\n⣿⣿⡄⠀⠀⠀⠀⠀⠁⡀⠀⠀⠀⠀⠈⠉⠛⢷⣭⠉⠁⠀⠀⣿⣿\n⣇⣀. INDIA🇮🇳INDIA🇮🇳⠆⠠..⠘⢷⣿⣿⣛⠐⣶⣿⣿\n⣿⣄⠀⣰⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠐⢀⣠⣿⣿⣿⣾⣿⣿⣿\n⣿⣿⣿⣿⠀⠀⠀⠀⡠⠀⠀⠀⠀⠀⢀⣠⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⠀⠀⠀⠀⠀⠀⠀⠄⠀⣤⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⣠⣤⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⣿⠀⠀⠂⠀⠀⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⣿⣇⠀⠀⠀⢠⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⣿⣿⡆⠀⢀⣼⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⣿⣿⣿⣦⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n\n[нαρργ ιи∂ρєи∂єиϲє ∂αγ🇮🇳](https://t.me/FirexSupport)',
)
done += 1
except:
lol += 1
await event.reply(
'happy Independence day 🇮🇳 from FIREX support\nthanks for using this Plugin.'
)
CmdHelp("indanime").add_command("indanime", None, "Wish u happy indpendamce day").add()
| 38.225806
| 488
| 0.357806
|
from . import *
@bot.on(admin_cmd(pattern="indanime(.*)"))
async def xd(event):
await event.edit("wishing to all🇮🇳🇮🇳...")
event.pattern_match.group(1)
async for tele in borg.iter_dialogs():
if tele.is_group:
chat = tele.id
lol = 0
done = 0
try:
await bot.send_message(
chat,
'⣿⣿⣿⣿⣿⣍⠀⠉⠻⠟⠻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⣿⡇⠀⠀⠀⠀⣰⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⣿⣿⠓⠀⠀⢒⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⡿⠃⠀⠀⠀⠀⠈⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠿⣿\n⣿⡿⠋⠋⠀⠀⠀⠀⠀⠀⠈⠙⠻⢿⢿⣿⣿⡿⣿⣿⡟⠋⠀⢀⣩\n⣿⣿⡄⠀⠀⠀⠀⠀⠁⡀⠀⠀⠀⠀⠈⠉⠛⢷⣭⠉⠁⠀⠀⣿⣿\n⣇⣀. INDIA🇮🇳INDIA🇮🇳⠆⠠..⠘⢷⣿⣿⣛⠐⣶⣿⣿\n⣿⣄⠀⣰⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠐⢀⣠⣿⣿⣿⣾⣿⣿⣿\n⣿⣿⣿⣿⠀⠀⠀⠀⡠⠀⠀⠀⠀⠀⢀⣠⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⠀⠀⠀⠀⠀⠀⠀⠄⠀⣤⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⡄⠀⠀⠀⠀⠀⣠⣤⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⣿⠀⠀⠂⠀⠀⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⣿⣇⠀⠀⠀⢠⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⣿⣿⡆⠀⢀⣼⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n⣿⣿⣿⣿⣿⣿⣿⣦⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\n\n[нαρργ ιи∂ρєи∂єиϲє ∂αγ🇮🇳](https://t.me/FirexSupport)',
)
done += 1
except:
lol += 1
await event.reply(
'happy Independence day 🇮🇳 from FIREX support\nthanks for using this Plugin.'
)
CmdHelp("indanime").add_command("indanime", None, "Wish u happy indpendamce day").add()
| true
| true
|
7906cee89d26da64111f43d4fc429ec863c00f57
| 96
|
py
|
Python
|
data/__init__.py
|
tranandrew0421/Rin-Bot
|
df81c7c5ed41f3623eeabc0eb455c60672035163
|
[
"MIT"
] | null | null | null |
data/__init__.py
|
tranandrew0421/Rin-Bot
|
df81c7c5ed41f3623eeabc0eb455c60672035163
|
[
"MIT"
] | null | null | null |
data/__init__.py
|
tranandrew0421/Rin-Bot
|
df81c7c5ed41f3623eeabc0eb455c60672035163
|
[
"MIT"
] | null | null | null |
from pathlib import Path
data_path = Path(Path(__file__).parent)
__all__ = ['data_path']
| 16
| 40
| 0.708333
|
from pathlib import Path
data_path = Path(Path(__file__).parent)
__all__ = ['data_path']
| true
| true
|
7906cf461c762bc51e1bf645aa1ff7ce87ff52a9
| 57,035
|
py
|
Python
|
plenum/test/helper.py
|
SchwiftyRick/indy-plenum
|
d23b99423eb805971e50446d7e89ada892aa6811
|
[
"Apache-2.0"
] | 1
|
2021-04-03T07:45:01.000Z
|
2021-04-03T07:45:01.000Z
|
plenum/test/helper.py
|
SchwiftyRick/indy-plenum
|
d23b99423eb805971e50446d7e89ada892aa6811
|
[
"Apache-2.0"
] | 1
|
2021-07-14T17:10:04.000Z
|
2021-07-14T17:10:04.000Z
|
plenum/test/helper.py
|
SchwiftyRick/indy-plenum
|
d23b99423eb805971e50446d7e89ada892aa6811
|
[
"Apache-2.0"
] | 2
|
2021-02-19T15:36:50.000Z
|
2021-07-20T11:37:54.000Z
|
from datetime import datetime
import itertools
import os
import random
import string
from _signal import SIGINT
from contextlib import contextmanager
from functools import partial
from itertools import permutations, combinations
from shutil import copyfile
from sys import executable
from time import sleep, perf_counter
from typing import Tuple, Iterable, Dict, Optional, List, Any, Sequence, Union, Callable
import base58
import pytest
from indy.pool import set_protocol_version
from common.serializers.serialization import invalid_index_serializer
from crypto.bls.bls_factory import BlsFactoryCrypto
from plenum.common.event_bus import ExternalBus, InternalBus
from plenum.common.member.member import Member
from plenum.common.member.steward import Steward
from plenum.common.signer_did import DidSigner
from plenum.common.signer_simple import SimpleSigner
from plenum.common.timer import QueueTimer, TimerService
from plenum.config import Max3PCBatchWait
from psutil import Popen
import json
import asyncio
from indy.ledger import sign_and_submit_request, sign_request, submit_request, build_node_request, \
multi_sign_request
from indy.error import ErrorCode, IndyError
from ledger.genesis_txn.genesis_txn_file_util import genesis_txn_file
from plenum.common.constants import DOMAIN_LEDGER_ID, OP_FIELD_NAME, REPLY, REQNACK, REJECT, \
CURRENT_PROTOCOL_VERSION, STEWARD, VALIDATOR, TRUSTEE, DATA, BLS_KEY, BLS_KEY_PROOF
from plenum.common.exceptions import RequestNackedException, RequestRejectedException, CommonSdkIOException, \
PoolLedgerTimeoutException
from plenum.common.messages.node_messages import Reply, PrePrepare, Prepare, Commit
from plenum.common.txn_util import get_req_id, get_from, get_payload_data
from plenum.common.types import f, OPERATION
from plenum.common.util import getNoInstances, get_utc_epoch
from plenum.common.config_helper import PNodeConfigHelper
from plenum.common.request import Request
from plenum.server.consensus.ordering_service import OrderingService
from plenum.server.node import Node
from plenum.test import waits
from plenum.test.constants import BUY
from plenum.test.msgs import randomMsg
from plenum.test.spy_helpers import getLastClientReqReceivedForNode, getAllArgs, getAllReturnVals, \
getAllMsgReceivedForNode
from plenum.test.test_node import TestNode, TestReplica, \
getPrimaryReplica, getNonPrimaryReplicas
from stp_core.common.log import getlogger
from stp_core.loop.eventually import eventuallyAll, eventually
from stp_core.loop.looper import Looper
from stp_core.network.util import checkPortAvailable
logger = getlogger()
# noinspection PyUnresolvedReferences
def ordinal(n):
return "%d%s" % (
n, "tsnrhtdd"[(n / 10 % 10 != 1) * (n % 10 < 4) * n % 10::4])
def random_string(length: int) -> str:
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
def send_reqs_batches_and_get_suff_replies(
looper: Looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client,
num_reqs: int,
num_batches=1,
**kwargs):
# This method assumes that `num_reqs` <= num_batches*MaxbatchSize
if num_batches == 1:
return sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, num_reqs)
else:
requests = []
for _ in range(num_batches - 1):
requests.extend(
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, num_reqs // num_batches))
rem = num_reqs % num_batches
if rem == 0:
rem = num_reqs // num_batches
requests.extend(
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, rem))
return requests
# noinspection PyIncorrectDocstring
def checkResponseCorrectnessFromNodes(receivedMsgs: Iterable, reqId: int,
fValue: int) -> bool:
"""
the client must get at least :math:`f+1` responses
"""
msgs = [(msg[f.RESULT.nm][f.REQ_ID.nm], msg[f.RESULT.nm][f.IDENTIFIER.nm])
for msg in getRepliesFromClientInbox(receivedMsgs, reqId)]
groupedMsgs = {}
for tpl in msgs:
groupedMsgs[tpl] = groupedMsgs.get(tpl, 0) + 1
assert max(groupedMsgs.values()) >= fValue + 1
def getRepliesFromClientInbox(inbox, reqId) -> list:
return list({_: msg for msg, _ in inbox if
msg[OP_FIELD_NAME] == REPLY and msg[f.RESULT.nm]
[f.REQ_ID.nm] == reqId}.values())
def checkLastClientReqForNode(node: TestNode, expectedRequest: Request):
recvRequest = getLastClientReqReceivedForNode(node)
assert recvRequest
assert expectedRequest.as_dict == recvRequest.as_dict
# noinspection PyIncorrectDocstring
def assertLength(collection: Iterable[Any], expectedLength: int):
assert len(
collection) == expectedLength, "Observed length was {} but " \
"expected length was {}". \
format(len(collection), expectedLength)
def assertEquality(observed: Any, expected: Any, details=None):
assert observed == expected, "Observed value was {} but expected value " \
"was {}, details: {}".format(observed, expected, details)
def randomOperation():
return {
"type": BUY,
"amount": random.randint(10, 100000)
}
def random_requests(count):
return [randomOperation() for _ in range(count)]
def random_request_objects(count, protocol_version):
req_dicts = random_requests(count)
return [Request(operation=op, protocolVersion=protocol_version) for op in req_dicts]
def buildCompletedTxnFromReply(request, reply: Reply) -> Dict:
txn = request.operation
txn.update(reply)
return txn
async def msgAll(nodes):
# test sending messages from every node to every other node
# TODO split send and check so that the messages can be sent concurrently
for p in permutations(nodes, 2):
await sendMessageAndCheckDelivery(p[0], p[1])
def sendMessage(sender: Node,
reciever: Node,
msg: Optional[Tuple] = None):
"""
Sends message from one node to another
:param nodes:
:param sender: sender
:param reciever: recepient
:param msg: optional message - by default random one generated
:return:
"""
logger.debug("Sending msg from {} to {}".format(sender.name, reciever.name))
msg = msg if msg else randomMsg()
rid = sender.nodestack.getRemote(reciever.name).uid
sender.nodestack.send(msg, rid)
async def sendMessageAndCheckDelivery(sender: Node,
reciever: Node,
msg: Optional[Tuple] = None,
method=None,
customTimeout=None):
"""
Sends message from one node to another and checks that it was delivered
:param sender: sender
:param reciever: recepient
:param msg: optional message - by default random one generated
:param customTimeout:
:return:
"""
logger.debug("Sending msg from {} to {}".format(sender.name, reciever.name))
msg = msg if msg else randomMsg()
rid = sender.nodestack.getRemote(reciever.name).uid
sender.nodestack.send(msg, rid)
timeout = customTimeout or waits.expectedNodeToNodeMessageDeliveryTime()
await eventually(checkMessageReceived, msg, reciever, method,
retryWait=.1,
timeout=timeout,
ratchetSteps=10)
def sendMessageToAll(nodes,
sender: Node,
msg: Optional[Tuple] = None):
"""
Sends message from one node to all others
:param nodes:
:param sender: sender
:param msg: optional message - by default random one generated
:return:
"""
for node in nodes:
if node != sender:
sendMessage(sender, node, msg)
async def sendMessageAndCheckDeliveryToAll(nodes,
sender: Node,
msg: Optional[Tuple] = None,
method=None,
customTimeout=None):
"""
Sends message from one node to all other and checks that it was delivered
:param nodes:
:param sender: sender
:param msg: optional message - by default random one generated
:param customTimeout:
:return:
"""
customTimeout = customTimeout or waits.expectedNodeToAllNodesMessageDeliveryTime(
len(nodes))
for node in nodes:
if node != sender:
await sendMessageAndCheckDelivery(sender, node, msg, method, customTimeout)
break
def checkMessageReceived(msg, receiver, method: str = None):
allMsgs = getAllMsgReceivedForNode(receiver, method)
assert msg in allMsgs
def addNodeBack(node_set,
looper: Looper,
node: Node,
tconf,
tdir) -> TestNode:
config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir)
restartedNode = TestNode(node.name,
config_helper=config_helper,
config=tconf,
ha=node.nodestack.ha,
cliha=node.clientstack.ha)
node_set.append(restartedNode)
looper.add(restartedNode)
return restartedNode
def checkPropagateReqCountOfNode(node: TestNode, digest: str):
assert digest in node.requests
assert node.quorums.propagate.is_reached(
len(node.requests[digest].propagates))
def requestReturnedToNode(node: TestNode, key: str,
instId: int):
params = getAllArgs(node, node.processOrdered)
# Skipping the view no and time from each ordered request
recvdOrderedReqs = [
(p['ordered'].instId, p['ordered'].valid_reqIdr[0]) for p in params]
expected = (instId, key)
return expected in recvdOrderedReqs
def checkRequestReturnedToNode(node: TestNode, key: str,
instId: int):
assert requestReturnedToNode(node, key, instId)
def checkRequestNotReturnedToNode(node: TestNode, key: str,
instId: int):
assert not requestReturnedToNode(node, key, instId)
def check_request_is_not_returned_to_nodes(txnPoolNodeSet, request):
instances = range(getNoInstances(len(txnPoolNodeSet)))
for node, inst_id in itertools.product(txnPoolNodeSet, instances):
checkRequestNotReturnedToNode(node,
request.key,
inst_id)
def checkPrePrepareReqSent(replica: TestReplica, req: Request):
prePreparesSent = getAllArgs(replica._ordering_service,
replica._ordering_service.send_pre_prepare)
assert (req.digest,) in \
[p["ppReq"].reqIdr for p in prePreparesSent]
def checkPrePrepareReqRecvd(replicas: Iterable[TestReplica],
expectedRequest: PrePrepare):
for replica in replicas:
params = getAllArgs(replica._ordering_service, replica._ordering_service._can_process_pre_prepare)
assert expectedRequest.reqIdr in [p['pre_prepare'].reqIdr for p in params]
def checkPrepareReqSent(replica: TestReplica, key: str,
view_no: int):
paramsList = getAllArgs(replica._ordering_service, replica._ordering_service._can_prepare)
rv = getAllReturnVals(replica._ordering_service,
replica._ordering_service._can_prepare)
args = [p["ppReq"].reqIdr for p in paramsList if p["ppReq"].viewNo == view_no]
assert (key,) in args
idx = args.index((key,))
assert rv[idx]
def checkSufficientPrepareReqRecvd(replica: TestReplica, viewNo: int,
ppSeqNo: int):
key = (viewNo, ppSeqNo)
assert key in replica._ordering_service.prepares
assert len(replica._ordering_service.prepares[key][1]) >= replica.quorums.prepare.value
def checkSufficientCommitReqRecvd(replicas: Iterable[TestReplica], viewNo: int,
ppSeqNo: int):
for replica in replicas:
key = (viewNo, ppSeqNo)
assert key in replica._ordering_service.commits
received = len(replica._ordering_service.commits[key][1])
minimum = replica.quorums.commit.value
assert received > minimum
def checkViewNoForNodes(nodes: Iterable[TestNode], expectedViewNo: int = None):
"""
Checks if all the given nodes have the expected view no
:param nodes: The nodes to check for
:param expectedViewNo: the view no that the nodes are expected to have
:return:
"""
viewNos = set()
for node in nodes:
logger.debug("{}'s view no is {}".format(node, node.master_replica.viewNo))
viewNos.add(node.master_replica.viewNo)
assert len(viewNos) == 1, 'Expected 1, but got {}. ' \
'ViewNos: {}'.format(len(viewNos), [(n.name, n.master_replica.viewNo) for n in nodes])
vNo, = viewNos
if expectedViewNo is not None:
assert vNo >= expectedViewNo, \
'Expected at least {}, but got {}'.format(expectedViewNo, vNo)
return vNo
def waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=None,
customTimeout=None):
"""
Waits for nodes to come to same view.
Raises exception when time is out
"""
timeout = customTimeout or waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
return looper.run(eventually(checkViewNoForNodes,
txnPoolNodeSet,
expectedViewNo,
timeout=timeout))
def getNodeSuspicions(node: TestNode, code: int = None):
params = getAllArgs(node, TestNode.reportSuspiciousNode)
if params and code is not None:
params = [param for param in params
if 'code' in param and param['code'] == code]
return params
def checkDiscardMsg(processors, discardedMsg,
reasonRegexp, *exclude):
if not exclude:
exclude = []
for p in filterNodeSet(processors, exclude):
last = p.spylog.getLastParams(p.discard, required=False)
assert last
assert last['msg'] == discardedMsg
assert reasonRegexp in last['reason']
def checkMasterReplicaDiscardMsg(processors, discardedMsg,
reasonRegexp, *exclude):
if not exclude:
exclude = []
for p in filterNodeSet(processors, exclude):
stasher = p.master_replica.stasher
last = stasher.spylog.getLastParams(stasher.discard, required=False)
assert last
assert last['msg'] == discardedMsg
assert reasonRegexp in last['reason']
def countDiscarded(processor, reasonPat):
c = 0
for entry in processor.spylog.getAll(processor.discard):
if 'reason' in entry.params and (
(isinstance(
entry.params['reason'],
str) and reasonPat in entry.params['reason']),
(reasonPat in str(
entry.params['reason']))):
c += 1
return c
def filterNodeSet(nodeSet, exclude: List[Union[str, Node]]):
"""
Return a set of nodes with the nodes in exclude removed.
:param nodeSet: the set of nodes
:param exclude: the list of nodes or node names to exclude
:return: the filtered nodeSet
"""
return [n for n in nodeSet
if n not in
[nodeSet[x] if isinstance(x, str) else x for x in exclude]]
def whitelistNode(toWhitelist: str, frm: Sequence[TestNode], *codes):
for node in frm:
node.whitelistNode(toWhitelist, *codes)
def whitelistClient(toWhitelist: str, frm: Sequence[TestNode], *codes):
for node in frm:
node.whitelistClient(toWhitelist, *codes)
def assertExp(condition):
assert condition
def assert_eq(actual, expected):
assert actual == expected
def assert_in(value, collection):
assert value in collection
def assertFunc(func):
assert func()
def checkLedgerEquality(ledger1, ledger2):
assertLength(ledger1, ledger2.size)
assertEquality(ledger1.root_hash, ledger2.root_hash)
assertEquality(ledger1.uncommitted_root_hash, ledger2.uncommitted_root_hash)
def checkAllLedgersEqual(*ledgers):
for l1, l2 in combinations(ledgers, 2):
checkLedgerEquality(l1, l2)
def checkStateEquality(state1, state2):
if state1 is None:
return state2 is None
assertEquality(state1.as_dict, state2.as_dict)
assertEquality(state1.committedHeadHash, state2.committedHeadHash)
assertEquality(state1.committedHead, state2.committedHead)
def check_seqno_db_equality(db1, db2):
if db1._keyValueStorage._db is None or db2._keyValueStorage._db is None:
return False
assert db1.size == db2.size, \
"{} != {}".format(db1.size, db2.size)
assert {bytes(k): bytes(v) for k, v in db1._keyValueStorage.iterator()} == \
{bytes(k): bytes(v) for k, v in db2._keyValueStorage.iterator()}
def check_primaries_equality(node1, node2):
assert node1.primaries == node2.primaries, \
"{} != {}, Node1: {}; Node2: {}".format(node1.primaries, node2.primaries, node1, node2)
def check_last_ordered_3pc(node1, node2):
master_replica_1 = node1.master_replica
master_replica_2 = node2.master_replica
assert master_replica_1.last_ordered_3pc == master_replica_2.last_ordered_3pc, \
"{} != {} Node1: {}, Node2: {}".format(master_replica_1.last_ordered_3pc,
master_replica_2.last_ordered_3pc,
node1, node2)
return master_replica_1.last_ordered_3pc
def check_last_ordered_3pc_backup(node1, node2):
assert len(node1.replicas) == len(node2.replicas)
for i in range(1, len(node1.replicas)):
replica1 = node1.replicas[i]
replica2 = node2.replicas[i]
assert replica1.last_ordered_3pc == replica2.last_ordered_3pc, \
"{}: {} != {}: {}".format(replica1, replica1.last_ordered_3pc,
replica2, replica2.last_ordered_3pc)
def check_view_no(node1, node2):
assert node1.master_replica.viewNo == node2.master_replica.viewNo, \
"{} != {}".format(node1.master_replica.viewNo, node2.master_replica.viewNo)
def check_last_ordered_3pc_on_all_replicas(nodes, last_ordered_3pc):
for n in nodes:
for r in n.replicas.values():
assert r.last_ordered_3pc == last_ordered_3pc, \
"{} != {}, Replica: {}".format(r.last_ordered_3pc,
last_ordered_3pc, r)
def check_last_ordered_3pc_on_master(nodes, last_ordered_3pc):
for n in nodes:
assert n.master_replica.last_ordered_3pc == last_ordered_3pc, \
"{} != {}".format(n.master_replica.last_ordered_3pc,
last_ordered_3pc)
def check_last_ordered_3pc_on_backup(nodes, last_ordered_3pc):
for n in nodes:
for i, r in n.replicas.items():
if i != 0:
assert r.last_ordered_3pc == last_ordered_3pc, \
"{} != {}".format(r.last_ordered_3pc,
last_ordered_3pc)
def randomText(size):
return ''.join(random.choice(string.ascii_letters) for _ in range(size))
def mockGetInstalledDistributions(packages):
ret = []
for pkg in packages:
obj = type('', (), {})()
obj.key = pkg
ret.append(obj)
return ret
def mockImportModule(moduleName):
obj = type(moduleName, (), {})()
obj.send_message = lambda *args: None
return obj
def initDirWithGenesisTxns(
dirName,
tconf,
tdirWithPoolTxns=None,
tdirWithDomainTxns=None,
new_pool_txn_file=None,
new_domain_txn_file=None):
os.makedirs(dirName, exist_ok=True)
if tdirWithPoolTxns:
new_pool_txn_file = new_pool_txn_file or tconf.poolTransactionsFile
copyfile(
os.path.join(
tdirWithPoolTxns, genesis_txn_file(
tconf.poolTransactionsFile)), os.path.join(
dirName, genesis_txn_file(new_pool_txn_file)))
if tdirWithDomainTxns:
new_domain_txn_file = new_domain_txn_file or tconf.domainTransactionsFile
copyfile(
os.path.join(
tdirWithDomainTxns, genesis_txn_file(
tconf.domainTransactionsFile)), os.path.join(
dirName, genesis_txn_file(new_domain_txn_file)))
def stopNodes(nodes: List[TestNode], looper=None, ensurePortsFreedUp=True):
if ensurePortsFreedUp:
assert looper, 'Need a looper to make sure ports are freed up'
for node in nodes:
node.stop()
if ensurePortsFreedUp:
ports = [[n.nodestack.ha[1], n.clientstack.ha[1]] for n in nodes]
waitUntilPortIsAvailable(looper, ports)
def waitUntilPortIsAvailable(looper, ports, timeout=5):
ports = itertools.chain(*ports)
def chk():
for port in ports:
checkPortAvailable(("", port))
looper.run(eventually(chk, retryWait=.5, timeout=timeout))
def run_script(script, *args):
s = os.path.join(os.path.dirname(__file__), '../../scripts/' + script)
command = [executable, s]
command.extend(args)
with Popen([executable, s]) as p:
sleep(4)
p.send_signal(SIGINT)
p.wait(timeout=1)
assert p.poll() == 0, 'script failed'
def viewNoForNodes(nodes):
viewNos = {node.viewNo for node in nodes}
assert 1 == len(viewNos)
return next(iter(viewNos))
def primaryNodeNameForInstance(nodes, instanceId):
primaryNames = {node.replicas[instanceId].primaryName for node in nodes}
assert 1 == len(primaryNames)
primaryReplicaName = next(iter(primaryNames))
return primaryReplicaName[:-2]
def nodeByName(nodes, name):
for node in nodes:
if node.name == name:
return node
raise Exception("Node with the name '{}' has not been found.".format(name))
def send_pre_prepare(view_no, pp_seq_no, nodes,
state_root=None, txn_root=None):
pre_prepare = PrePrepare(
0,
view_no,
pp_seq_no,
get_utc_epoch(),
["requests digest"],
0,
"random digest",
DOMAIN_LEDGER_ID,
state_root or '0' * 44,
txn_root or '0' * 44,
0,
True
)
primary_node = getPrimaryReplica(nodes).node
non_primary_nodes = set(nodes) - {primary_node}
sendMessageToAll(nodes, primary_node, pre_prepare)
for non_primary_node in non_primary_nodes:
sendMessageToAll(nodes, non_primary_node, pre_prepare)
def send_prepare(view_no, pp_seq_no, nodes, state_root=None, txn_root=None):
prepare = Prepare(
0,
view_no,
pp_seq_no,
get_utc_epoch(),
"random digest",
state_root or '0' * 44,
txn_root or '0' * 44
)
primary_node = getPrimaryReplica(nodes).node
sendMessageToAll(nodes, primary_node, prepare)
def send_commit(view_no, pp_seq_no, nodes):
commit = Commit(
0,
view_no,
pp_seq_no)
primary_node = getPrimaryReplica(nodes).node
sendMessageToAll(nodes, primary_node, commit)
def get_key_from_req(req: dict):
return Request(identifier=req[f.IDENTIFIER.nm],
reqId=req[f.REQ_ID.nm],
operation=req[OPERATION],
protocolVersion=req[f.PROTOCOL_VERSION.nm],
signature=req.get(f.SIG.nm),
taaAcceptance=req.get(f.TAA_ACCEPTANCE)
).key
def chk_all_funcs(looper, funcs, acceptable_fails=0, retry_wait=None,
timeout=None, override_eventually_timeout=False):
# TODO: Move this logic to eventuallyAll
def chk():
fails = 0
last_ex = None
for func in funcs:
try:
func()
except Exception as ex:
fails += 1
if fails >= acceptable_fails:
logger.debug('Too many fails, the last one: {}'.format(repr(ex)))
last_ex = ex
assert fails <= acceptable_fails, '{} out of {} failed. Last exception:' \
' {}'.format(fails, len(funcs), last_ex)
kwargs = {}
if retry_wait:
kwargs['retryWait'] = retry_wait
if timeout:
kwargs['timeout'] = timeout
if override_eventually_timeout:
kwargs['override_timeout_limit'] = override_eventually_timeout
looper.run(eventually(chk, **kwargs))
def check_request_ordered(node, request: Request):
# it's ok to iterate through all txns since this is a test
for seq_no, txn in node.domainLedger.getAllTxn():
if get_req_id(txn) is None:
continue
if get_from(txn) is None:
continue
if get_req_id(txn) != request.reqId:
continue
if get_from(txn) != request.identifier:
continue
return True
raise ValueError('{} request not ordered by node {}'.format(request, node.name))
def wait_for_requests_ordered(looper, nodes, requests):
node_count = len(nodes)
timeout_per_request = waits.expectedTransactionExecutionTime(node_count)
total_timeout = (1 + len(requests) / 10) * timeout_per_request
coros = [partial(check_request_ordered,
node,
request)
for (node, request) in list(itertools.product(nodes, requests))]
looper.run(eventuallyAll(*coros, retryWait=1, totalTimeout=total_timeout))
def create_new_test_node(test_node_class, node_config_helper_class, name, conf,
tdir, plugin_paths, bootstrap_cls=None,
node_ha=None, client_ha=None):
config_helper = node_config_helper_class(name, conf, chroot=tdir)
return test_node_class(name,
config_helper=config_helper,
config=conf,
pluginPaths=plugin_paths,
ha=node_ha,
cliha=client_ha,
bootstrap_cls=bootstrap_cls)
# ####### SDK
def sdk_gen_request(operation, protocol_version=CURRENT_PROTOCOL_VERSION,
identifier=None, **kwargs):
# Question: Why this method is called sdk_gen_request? It does not use
# the indy-sdk
return Request(operation=operation, reqId=random.randint(10, 1000000000),
protocolVersion=protocol_version, identifier=identifier,
**kwargs)
def sdk_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did):
_, new_steward_did = sdk_wallet_new_steward
node_ip = '{}.{}.{}.{}'.format(
random.randint(1, 240),
random.randint(1, 240),
random.randint(1, 240),
random.randint(1, 240))
data = {
'alias': node_alias,
'client_port': 50001,
'node_port': 50002,
'node_ip': node_ip,
'client_ip': node_ip,
'services': []
}
req = looper.loop.run_until_complete(
build_node_request(new_steward_did, node_did, json.dumps(data)))
return Request(**json.loads(req))
def sdk_random_request_objects(count, protocol_version, identifier=None,
**kwargs):
ops = random_requests(count)
return [sdk_gen_request(op, protocol_version=protocol_version,
identifier=identifier, **kwargs) for op in ops]
def sdk_sign_request_objects(looper, sdk_wallet, reqs: Sequence):
wallet_h, did = sdk_wallet
reqs_str = [json.dumps(req.as_dict) for req in reqs]
reqs = [looper.loop.run_until_complete(sign_request(wallet_h, did, req))
for req in reqs_str]
return reqs
def sdk_multi_sign_request_objects(looper, sdk_wallets, reqs: Sequence):
reqs_str = [json.dumps(req.as_dict) for req in reqs]
for sdk_wallet in sdk_wallets:
wallet_h, did = sdk_wallet
reqs_str = [looper.loop.run_until_complete(multi_sign_request(wallet_h, did, req))
for req in reqs_str]
return reqs_str
def sdk_sign_request_strings(looper, sdk_wallet, reqs: Sequence):
wallet_h, did = sdk_wallet
reqs_str = [json.dumps(req) for req in reqs]
reqs = [looper.loop.run_until_complete(sign_request(wallet_h, did, req))
for req in reqs_str]
return reqs
def sdk_multisign_request_object(looper, sdk_wallet, req):
wh, did = sdk_wallet
return looper.loop.run_until_complete(multi_sign_request(wh, did, req))
def sdk_multisign_request_from_dict(looper, sdk_wallet, op, reqId=None, taa_acceptance=None, endorser=None):
wh, did = sdk_wallet
reqId = reqId or random.randint(10, 100000)
request = Request(operation=op, reqId=reqId,
protocolVersion=CURRENT_PROTOCOL_VERSION, identifier=did,
taaAcceptance=taa_acceptance,
endorser=endorser)
req_str = json.dumps(request.as_dict)
resp = looper.loop.run_until_complete(multi_sign_request(wh, did, req_str))
return json.loads(resp)
def sdk_signed_random_requests(looper, sdk_wallet, count):
_, did = sdk_wallet
reqs_obj = sdk_random_request_objects(count, identifier=did,
protocol_version=CURRENT_PROTOCOL_VERSION)
return sdk_sign_request_objects(looper, sdk_wallet, reqs_obj)
def sdk_send_signed_requests(pool_h, signed_reqs: Sequence):
return [(json.loads(req),
asyncio.ensure_future(submit_request(pool_h, req)))
for req in signed_reqs]
def sdk_send_random_requests(looper, pool_h, sdk_wallet, count: int):
reqs = sdk_signed_random_requests(looper, sdk_wallet, count)
return sdk_send_signed_requests(pool_h, reqs)
def sdk_send_random_request(looper, pool_h, sdk_wallet):
rets = sdk_send_random_requests(looper, pool_h, sdk_wallet, 1)
return rets[0]
def sdk_send_random_pool_requests(looper, pool_h, sdk_wallet_new_steward, count: int):
node_alias = random_string(7)
node_did = SimpleSigner(seed=random_string(32).encode()).identifier
reqs = [sdk_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did) for _ in range(count)]
return [sdk_sign_and_submit_req_obj(looper, pool_h, sdk_wallet_new_steward, req) for req in reqs]
def sdk_send_random_pool_and_domain_requests(looper, pool_h, sdk_wallet_new_steward, count: int):
node_alias = random_string(7)
node_did = SimpleSigner(seed=random_string(32).encode()).identifier
req_gens = [
lambda: sdk_gen_request(random_requests(1)[0], identifier=sdk_wallet_new_steward[1]),
lambda: sdk_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did),
]
res = []
for i in range(count):
req = req_gens[i % len(req_gens)]()
res.append(sdk_sign_and_submit_req_obj(looper, pool_h, sdk_wallet_new_steward, req))
looper.runFor(0.1) # Give nodes some time to start ordering, so that requests are really alternating
return res
def sdk_sign_and_submit_req(pool_handle, sdk_wallet, req):
wallet_handle, sender_did = sdk_wallet
return json.loads(req), asyncio.ensure_future(
sign_and_submit_request(pool_handle, wallet_handle, sender_did, req))
def sdk_sign_and_submit_req_obj(looper, pool_handle, sdk_wallet, req_obj):
s_req = sdk_sign_request_objects(looper, sdk_wallet, [req_obj])[0]
return sdk_send_signed_requests(pool_handle, [s_req])[0]
def sdk_sign_and_submit_op(looper, pool_handle, sdk_wallet, op):
_, did = sdk_wallet
req_obj = sdk_gen_request(op, protocol_version=CURRENT_PROTOCOL_VERSION,
identifier=did)
s_req = sdk_sign_request_objects(looper, sdk_wallet, [req_obj])[0]
return sdk_send_signed_requests(pool_handle, [s_req])[0]
def sdk_get_reply(looper, sdk_req_resp, timeout=None):
req_json, resp_task = sdk_req_resp
# TODO: change timeout evaluating logic, when sdk will can tuning timeout from outside
if timeout is None:
timeout = waits.expectedTransactionExecutionTime(7)
try:
resp = looper.run(asyncio.wait_for(resp_task, timeout=timeout))
resp = json.loads(resp)
except IndyError as e:
resp = e.error_code
except TimeoutError as e:
resp = ErrorCode.PoolLedgerTimeout
return req_json, resp
# TODO: Check places where sdk_get_replies used without sdk_check_reply
# We need to be sure that test behaviour don't need to check response
# validity
def sdk_get_replies(looper, sdk_req_resp: Sequence, timeout=None):
resp_tasks = [resp for _, resp in sdk_req_resp]
# TODO: change timeout evaluating logic, when sdk will can tuning timeout from outside
if timeout is None:
timeout = waits.expectedTransactionExecutionTime(7)
def get_res(task, done_list):
if task in done_list:
try:
resp = json.loads(task.result())
except IndyError as e:
resp = e.error_code
else:
resp = ErrorCode.PoolLedgerTimeout
return resp
done, pending = looper.run(asyncio.wait(resp_tasks, timeout=timeout))
if pending:
for task in pending:
task.cancel()
ret = [(req, get_res(resp, done)) for req, resp in sdk_req_resp]
return ret
def sdk_check_reply(req_res):
req, res = req_res
if isinstance(res, ErrorCode):
if res == ErrorCode.PoolLedgerTimeout:
raise PoolLedgerTimeoutException('Got PoolLedgerTimeout for request {}'
.format(req))
else:
raise CommonSdkIOException('Got an error with code {} for request {}'
.format(res, req))
if not isinstance(res, dict):
raise CommonSdkIOException("Unexpected response format {}".format(res))
def _parse_op(res_dict):
if res_dict['op'] == REQNACK:
raise RequestNackedException('ReqNack of id {}. Reason: {}'
.format(req['reqId'], res_dict['reason']))
if res_dict['op'] == REJECT:
raise RequestRejectedException('Reject of id {}. Reason: {}'
.format(req['reqId'], res_dict['reason']))
if 'op' in res:
_parse_op(res)
else:
for resps in res.values():
if isinstance(resps, str):
_parse_op(json.loads(resps))
elif isinstance(resps, dict):
_parse_op(resps)
else:
raise CommonSdkIOException("Unexpected response format {}".format(res))
def sdk_get_and_check_replies(looper, sdk_req_resp: Sequence, timeout=None):
rets = []
for req_res in sdk_get_replies(looper, sdk_req_resp, timeout):
sdk_check_reply(req_res)
rets.append(req_res)
return rets
def sdk_eval_timeout(req_count: int, node_count: int,
customTimeoutPerReq: float = None, add_delay_to_timeout: float = 0):
timeout_per_request = customTimeoutPerReq or waits.expectedTransactionExecutionTime(node_count)
timeout_per_request += add_delay_to_timeout
# here we try to take into account what timeout for execution
# N request - total_timeout should be in
# timeout_per_request < total_timeout < timeout_per_request * N
# we cannot just take (timeout_per_request * N) because it is so huge.
# (for timeout_per_request=5 and N=10, total_timeout=50sec)
# lets start with some simple formula:
return (1 + req_count / 10) * timeout_per_request
def sdk_send_and_check(signed_reqs, looper, txnPoolNodeSet, pool_h, timeout=None):
if not timeout:
timeout = sdk_eval_timeout(len(signed_reqs), len(txnPoolNodeSet))
results = sdk_send_signed_requests(pool_h, signed_reqs)
sdk_replies = sdk_get_replies(looper, results, timeout=timeout)
for req_res in sdk_replies:
sdk_check_reply(req_res)
return sdk_replies
def sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet, count,
customTimeoutPerReq: float = None, add_delay_to_timeout: float = 0,
override_timeout_limit=False, total_timeout=None):
sdk_reqs = sdk_send_random_requests(looper, sdk_pool, sdk_wallet, count)
if not total_timeout:
total_timeout = sdk_eval_timeout(len(sdk_reqs), len(txnPoolNodeSet),
customTimeoutPerReq=customTimeoutPerReq,
add_delay_to_timeout=add_delay_to_timeout)
sdk_replies = sdk_get_replies(looper, sdk_reqs, timeout=total_timeout)
for req_res in sdk_replies:
sdk_check_reply(req_res)
return sdk_replies
def sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet,
num_reqs, num_batches=1, **kwargs):
# This method assumes that `num_reqs` <= num_batches*MaxbatchSize
if num_reqs < num_batches:
raise BaseException(
'sdk_send_batches_of_random_and_check method assumes that `num_reqs` <= num_batches*MaxbatchSize')
if num_batches == 1:
return sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet, num_reqs, **kwargs)
reqs_in_batch = num_reqs // num_batches
reqs_in_last_batch = reqs_in_batch + num_reqs % num_batches
sdk_replies = []
for _ in range(num_batches - 1):
sdk_replies.extend(sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool, sdk_wallet,
reqs_in_batch, **kwargs))
sdk_replies.extend(sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool, sdk_wallet,
reqs_in_last_batch, **kwargs))
return sdk_replies
def sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool, sdk_wallet,
num_reqs, num_batches=1, timeout=Max3PCBatchWait):
if num_reqs < num_batches:
raise BaseException(
'sdk_send_batches_of_random_and_check method assumes that `num_reqs` <= num_batches*MaxbatchSize')
if num_batches == 1:
sdk_reqs = sdk_send_random_requests(looper, sdk_pool, sdk_wallet, num_reqs)
looper.runFor(timeout)
return sdk_reqs
reqs_in_batch = num_reqs // num_batches
reqs_in_last_batch = reqs_in_batch + num_reqs % num_batches
sdk_reqs = []
for _ in range(num_batches - 1):
sdk_reqs.extend(sdk_send_random_requests(looper, sdk_pool, sdk_wallet, reqs_in_batch))
looper.runFor(timeout)
sdk_reqs.extend(sdk_send_random_requests(looper, sdk_pool, sdk_wallet, reqs_in_last_batch))
looper.runFor(timeout)
return sdk_reqs
def sdk_sign_request_from_dict(looper, sdk_wallet, op, reqId=None, taa_acceptance=None, endorser=None):
wallet_h, did = sdk_wallet
reqId = reqId or random.randint(10, 100000)
request = Request(operation=op, reqId=reqId,
protocolVersion=CURRENT_PROTOCOL_VERSION, identifier=did,
taaAcceptance=taa_acceptance,
endorser=endorser)
req_str = json.dumps(request.as_dict)
resp = looper.loop.run_until_complete(sign_request(wallet_h, did, req_str))
return json.loads(resp)
def sdk_check_request_is_not_returned_to_nodes(looper, nodeSet, request):
instances = range(getNoInstances(len(nodeSet)))
coros = []
for node, inst_id in itertools.product(nodeSet, instances):
c = partial(checkRequestNotReturnedToNode,
node=node,
identifier=request['identifier'],
reqId=request['reqId'],
instId=inst_id
)
coros.append(c)
timeout = waits.expectedTransactionExecutionTime(len(nodeSet))
looper.run(eventuallyAll(*coros, retryWait=1, totalTimeout=timeout))
def sdk_json_to_request_object(json_req):
return Request(identifier=json_req.get('identifier', None),
reqId=json_req['reqId'],
operation=json_req['operation'],
signature=json_req['signature'] if 'signature' in json_req else None,
protocolVersion=json_req['protocolVersion'] if 'protocolVersion' in json_req else None,
taaAcceptance=json_req.get('taaAcceptance', None))
def sdk_json_couples_to_request_list(json_couples):
req_list = []
for json_couple in json_couples:
req_list.append(sdk_json_to_request_object(json_couple[0]))
return req_list
def sdk_get_bad_response(looper, reqs, exception, message):
with pytest.raises(exception) as e:
sdk_get_and_check_replies(looper, reqs)
assert message in e._excinfo[1].args[0]
def sdk_set_protocol_version(looper, version=CURRENT_PROTOCOL_VERSION):
looper.loop.run_until_complete(set_protocol_version(version))
# Context managers to be used with tconf fixture
@contextmanager
def perf_monitor_disabled(tconf):
old_unsafe = tconf.unsafe.copy()
tconf.unsafe.add("disable_view_change")
yield tconf
tconf.unsafe = old_unsafe
@contextmanager
def view_change_timeout(tconf, vc_timeout, propose_timeout=None):
old_view_change_timeout = tconf.NEW_VIEW_TIMEOUT
old_propose_timeout = tconf.INITIAL_PROPOSE_VIEW_CHANGE_TIMEOUT
old_propagate_request_delay = tconf.PROPAGATE_REQUEST_DELAY
tconf.NEW_VIEW_TIMEOUT = vc_timeout
tconf.INITIAL_PROPOSE_VIEW_CHANGE_TIMEOUT = vc_timeout if propose_timeout is None else propose_timeout
tconf.PROPAGATE_REQUEST_DELAY = 0
yield tconf
tconf.NEW_VIEW_TIMEOUT = old_view_change_timeout
tconf.INITIAL_PROPOSE_VIEW_CHANGE_TIMEOUT = old_propose_timeout
tconf.PROPAGATE_REQUEST_DELAY = old_propagate_request_delay
@contextmanager
def max_3pc_batch_limits(tconf, size, wait=10000):
old_size = tconf.Max3PCBatchSize
old_wait = tconf.Max3PCBatchWait
tconf.Max3PCBatchSize = size
tconf.Max3PCBatchWait = wait
yield tconf
tconf.Max3PCBatchSize = old_size
tconf.Max3PCBatchWait = old_wait
@contextmanager
def freshness(tconf, enabled, timeout):
old_update_state = tconf.UPDATE_STATE_FRESHNESS
old_timeout = tconf.STATE_FRESHNESS_UPDATE_INTERVAL
tconf.UPDATE_STATE_FRESHNESS = enabled
tconf.STATE_FRESHNESS_UPDATE_INTERVAL = timeout
yield tconf
tconf.UPDATE_STATE_FRESHNESS = old_update_state
tconf.STATE_FRESHNESS_UPDATE_INTERVAL = old_timeout
@contextmanager
def primary_disconnection_time(tconf, value):
old_tolarate_disconnection = tconf.ToleratePrimaryDisconnection
tconf.ToleratePrimaryDisconnection = value
yield tconf
tconf.ToleratePrimaryDisconnection = old_tolarate_disconnection
@contextmanager
def acc_monitor(tconf, acc_monitor_enabled=True, acc_monitor_timeout=3, acc_monitor_delta=0):
old_timeout = tconf.ACC_MONITOR_TIMEOUT
old_delta = tconf.ACC_MONITOR_TXN_DELTA_K
old_acc_monitor_enabled = tconf.ACC_MONITOR_ENABLED
tconf.ACC_MONITOR_TIMEOUT = acc_monitor_timeout
tconf.ACC_MONITOR_TXN_DELTA_K = acc_monitor_delta
tconf.ACC_MONITOR_ENABLED = acc_monitor_enabled
yield tconf
tconf.ACC_MONITOR_TIMEOUT = old_timeout
tconf.ACC_MONITOR_TXN_DELTA_K = old_delta
tconf.ACC_MONITOR_ENABLED = old_acc_monitor_enabled
def create_pre_prepare_params(state_root,
ledger_id=DOMAIN_LEDGER_ID,
txn_root=None,
timestamp=None,
bls_multi_sig=None,
view_no=0,
pool_state_root=None,
pp_seq_no=0,
inst_id=0,
audit_txn_root=None,
reqs=None,
bls_multi_sigs=None):
if timestamp is None:
timestamp = get_utc_epoch()
req_idrs = [req.key for req in reqs] if reqs is not None else [random_string(32)]
digest = OrderingService.generate_pp_digest(req_idrs, view_no, timestamp)
params = [inst_id,
view_no,
pp_seq_no,
timestamp,
req_idrs,
init_discarded(0),
digest,
ledger_id,
state_root,
txn_root or '1' * 32,
0,
True,
pool_state_root or generate_state_root(),
audit_txn_root or generate_state_root()]
if bls_multi_sig:
# Pass None for backward compatibility
params.append(None)
params.append([bls_multi_sig.as_list()])
elif bls_multi_sigs is not None:
# Pass None for backward compatibility
params.append(None)
params.append([sig.as_list() for sig in bls_multi_sigs])
return params
def create_pre_prepare_no_bls(state_root, view_no=0, pool_state_root=None, pp_seq_no=0, inst_id=0, audit_txn_root=None):
params = create_pre_prepare_params(state_root=state_root,
view_no=view_no,
pool_state_root=pool_state_root,
pp_seq_no=pp_seq_no,
inst_id=inst_id,
audit_txn_root=audit_txn_root)
return PrePrepare(*params)
def create_commit_params(view_no, pp_seq_no, inst_id=0):
return [inst_id, view_no, pp_seq_no]
def create_commit_no_bls_sig(req_key, inst_id=0):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no, inst_id=inst_id)
return Commit(*params)
def create_commit_with_bls_sig(req_key, bls_sig):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no)
# Use ' ' as BLS_SIG for backward-compatibility as BLS_SIG in COMMIT is optional but not Nullable
params.append(' ')
params.append({DOMAIN_LEDGER_ID: bls_sig})
return Commit(*params)
def create_commit_with_bls_sigs(req_key, bls_sig, lid):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no)
# Use ' ' as BLS_SIG for backward-compatibility as BLS_SIG in COMMIT is optional but not Nullable
params.append(' ')
params.append({str(lid): bls_sig})
return Commit(*params)
def create_commit_bls_sig(bls_bft, req_key, pre_prepare):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no)
params = bls_bft.update_commit(params, pre_prepare)
return Commit(*params)
def create_prepare_params(view_no, pp_seq_no, state_root, inst_id=0):
return [inst_id,
view_no,
pp_seq_no,
get_utc_epoch(),
"random digest",
state_root,
'1' * 32]
def create_prepare_from_pre_prepare(pre_prepare):
params = [pre_prepare.instId,
pre_prepare.viewNo,
pre_prepare.ppSeqNo,
pre_prepare.ppTime,
pre_prepare.digest,
pre_prepare.stateRootHash,
pre_prepare.txnRootHash,
pre_prepare.auditTxnRootHash]
return Prepare(*params)
def create_commit_from_pre_prepare(pre_prepare):
params = [pre_prepare.instId,
pre_prepare.viewNo,
pre_prepare.ppSeqNo]
return Commit(*params)
def create_prepare(req_key, state_root, inst_id=0):
view_no, pp_seq_no = req_key
params = create_prepare_params(view_no, pp_seq_no, state_root, inst_id=inst_id)
return Prepare(*params)
def generate_state_root():
return base58.b58encode(os.urandom(32)).decode("utf-8")
def init_discarded(value=None):
"""init discarded field with value and return message like representation"""
discarded = []
if value:
discarded.append(value)
return invalid_index_serializer.serialize(discarded, toBytes=False)
def incoming_3pc_msgs_count(nodes_count: int = 4) -> int:
pre_prepare = 1 # Message from Primary
prepares = nodes_count - 2 # Messages from all nodes exclude primary and self node
commits = nodes_count - 1 # Messages from all nodes exclude self node
# The primary node receives the same number of messages. Doesn't get pre-prepare,
# but gets one more prepare
return pre_prepare + prepares + commits
def check_missing_pre_prepares(nodes, count):
assert all(count <= len(replica._ordering_service.prePreparesPendingPrevPP)
for replica in getNonPrimaryReplicas(nodes, instId=0))
class MockTimestamp:
def __init__(self, value=datetime.utcnow()):
self.value = value
def __call__(self):
return self.value
class MockTimer(QueueTimer):
def __init__(self, start_time: int = 0):
self._ts = MockTimestamp(start_time)
QueueTimer.__init__(self, self._ts)
def set_time(self, value):
"""
Update time and run scheduled callbacks afterwards
"""
self._ts.value = value
self._log_time()
self.service()
def sleep(self, seconds):
"""
Simulate sleeping for given amount of seconds, and run scheduled callbacks afterwards
"""
self.set_time(self._ts.value + seconds)
def advance(self):
"""
Advance time to next scheduled callback and run that callback
"""
if not self._events:
return
event = self._pop_event()
self._ts.value = event.timestamp
self._log_time()
event.callback()
def advance_until(self, value):
"""
Advance time in steps until required value running scheduled callbacks in process
"""
while self._events and self._next_timestamp() <= value:
self.advance()
self._ts.value = value
def run_for(self, seconds):
"""
Simulate running for given amount of seconds, running scheduled callbacks at required timestamps
"""
self.advance_until(self._ts.value + seconds)
def wait_for(self, condition: Callable[[], bool], timeout: Optional = None, max_iterations: int = 10000):
"""
Advance time in steps until condition is reached, running scheduled callbacks in process
Throws TimeoutError if fail to reach condition (under required timeout if defined)
"""
counter = 0
deadline = self._ts.value + timeout if timeout else None
while self._events and not condition() and counter < max_iterations:
if deadline and self._next_timestamp() > deadline:
raise TimeoutError("Failed to reach condition in required time, {} iterations passed".format(counter))
self.advance()
counter += 1
if not condition():
if not self._events:
raise TimeoutError("Condition will be never reached, {} iterations passed".format(counter))
else:
raise TimeoutError("Failed to reach condition in {} iterations".format(max_iterations))
def run_to_completion(self, max_iterations: int = 10000):
"""
Advance time in steps until nothing is scheduled
"""
counter = 0
while self._events and counter < max_iterations:
self.advance()
counter += 1
if self._events:
raise TimeoutError("Failed to complete in {} iterations".format(max_iterations))
def _log_time(self):
# TODO: Probably better solution would be to replace real time in logs with virtual?
logger.info("Virtual time: {}".format(self._ts.value))
class TestStopwatch:
def __init__(self, timer: Optional[TimerService] = None):
self._get_current_time = timer.get_current_time if timer else perf_counter
self._start_time = self._get_current_time()
def start(self):
self._start_time = self._get_current_time()
def has_elapsed(self, expected_delay: float, tolerance: float = 0.1) -> bool:
elapsed = self._get_current_time() - self._start_time
return abs(expected_delay - elapsed) <= expected_delay * tolerance
class TestInternalBus(InternalBus):
def __init__(self):
super().__init__()
self.sent_messages = []
def send(self, message: Any, *args):
self.sent_messages.append(message)
super().send(message, *args)
class MockNetwork(ExternalBus):
def __init__(self):
super().__init__(self._send_message)
self.sent_messages = []
def _send_message(self, msg: Any, dst: ExternalBus.Destination):
self.sent_messages.append((msg, dst))
def connect(self, name: str):
self.update_connecteds(self.connecteds.union({name}))
def disconnect(self, name: str):
self.update_connecteds(self.connecteds.difference({name}))
def get_handler_by_type_wm(write_manager, h_type):
for h_l in write_manager.request_handlers.values():
for h in h_l:
if isinstance(h, h_type):
return h
def create_pool_txn_data(node_names: List[str],
crypto_factory: BlsFactoryCrypto,
get_free_port: Callable[[], int],
nodes_with_bls: Optional[int] = None):
nodeCount = len(node_names)
data = {'txns': [], 'seeds': {}, 'nodesWithBls': {}}
for i, node_name in zip(range(1, nodeCount + 1), node_names):
data['seeds'][node_name] = node_name + '0' * (32 - len(node_name))
steward_name = 'Steward' + str(i)
data['seeds'][steward_name] = steward_name + '0' * (32 - len(steward_name))
n_idr = SimpleSigner(seed=data['seeds'][node_name].encode()).identifier
s_idr = DidSigner(seed=data['seeds'][steward_name].encode())
data['txns'].append(
Member.nym_txn(nym=s_idr.identifier,
verkey=s_idr.verkey,
role=STEWARD,
name=steward_name,
seq_no=i)
)
node_txn = Steward.node_txn(steward_nym=s_idr.identifier,
node_name=node_name,
nym=n_idr,
ip='127.0.0.1',
node_port=get_free_port(),
client_port=get_free_port(),
client_ip='127.0.0.1',
services=[VALIDATOR],
seq_no=i)
if nodes_with_bls is None or i <= nodes_with_bls:
_, bls_key, bls_key_proof = crypto_factory.generate_bls_keys(
seed=data['seeds'][node_name])
get_payload_data(node_txn)[DATA][BLS_KEY] = bls_key
get_payload_data(node_txn)[DATA][BLS_KEY_PROOF] = bls_key_proof
data['nodesWithBls'][node_name] = True
data['txns'].append(node_txn)
# Add 4 Trustees
for i in range(4):
trustee_name = 'Trs' + str(i)
data['seeds'][trustee_name] = trustee_name + '0' * (
32 - len(trustee_name))
t_sgnr = DidSigner(seed=data['seeds'][trustee_name].encode())
data['txns'].append(
Member.nym_txn(nym=t_sgnr.identifier,
verkey=t_sgnr.verkey,
role=TRUSTEE,
name=trustee_name)
)
more_data_seeds = \
{
"Alice": "99999999999999999999999999999999",
"Jason": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
"John": "dddddddddddddddddddddddddddddddd",
"Les": "ffffffffffffffffffffffffffffffff"
}
more_data_users = []
for more_name, more_seed in more_data_seeds.items():
signer = DidSigner(seed=more_seed.encode())
more_data_users.append(
Member.nym_txn(nym=signer.identifier,
verkey=signer.verkey,
name=more_name,
creator="5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC")
)
data['txns'].extend(more_data_users)
data['seeds'].update(more_data_seeds)
return data
def get_pp_seq_no(nodes: list, inst_id=0) -> int:
los = set([n.replicas._replicas[inst_id].last_ordered_3pc[1] for n in nodes])
assert len(los) == 1
return los.pop()
| 36.212698
| 120
| 0.649215
|
from datetime import datetime
import itertools
import os
import random
import string
from _signal import SIGINT
from contextlib import contextmanager
from functools import partial
from itertools import permutations, combinations
from shutil import copyfile
from sys import executable
from time import sleep, perf_counter
from typing import Tuple, Iterable, Dict, Optional, List, Any, Sequence, Union, Callable
import base58
import pytest
from indy.pool import set_protocol_version
from common.serializers.serialization import invalid_index_serializer
from crypto.bls.bls_factory import BlsFactoryCrypto
from plenum.common.event_bus import ExternalBus, InternalBus
from plenum.common.member.member import Member
from plenum.common.member.steward import Steward
from plenum.common.signer_did import DidSigner
from plenum.common.signer_simple import SimpleSigner
from plenum.common.timer import QueueTimer, TimerService
from plenum.config import Max3PCBatchWait
from psutil import Popen
import json
import asyncio
from indy.ledger import sign_and_submit_request, sign_request, submit_request, build_node_request, \
multi_sign_request
from indy.error import ErrorCode, IndyError
from ledger.genesis_txn.genesis_txn_file_util import genesis_txn_file
from plenum.common.constants import DOMAIN_LEDGER_ID, OP_FIELD_NAME, REPLY, REQNACK, REJECT, \
CURRENT_PROTOCOL_VERSION, STEWARD, VALIDATOR, TRUSTEE, DATA, BLS_KEY, BLS_KEY_PROOF
from plenum.common.exceptions import RequestNackedException, RequestRejectedException, CommonSdkIOException, \
PoolLedgerTimeoutException
from plenum.common.messages.node_messages import Reply, PrePrepare, Prepare, Commit
from plenum.common.txn_util import get_req_id, get_from, get_payload_data
from plenum.common.types import f, OPERATION
from plenum.common.util import getNoInstances, get_utc_epoch
from plenum.common.config_helper import PNodeConfigHelper
from plenum.common.request import Request
from plenum.server.consensus.ordering_service import OrderingService
from plenum.server.node import Node
from plenum.test import waits
from plenum.test.constants import BUY
from plenum.test.msgs import randomMsg
from plenum.test.spy_helpers import getLastClientReqReceivedForNode, getAllArgs, getAllReturnVals, \
getAllMsgReceivedForNode
from plenum.test.test_node import TestNode, TestReplica, \
getPrimaryReplica, getNonPrimaryReplicas
from stp_core.common.log import getlogger
from stp_core.loop.eventually import eventuallyAll, eventually
from stp_core.loop.looper import Looper
from stp_core.network.util import checkPortAvailable
logger = getlogger()
def ordinal(n):
return "%d%s" % (
n, "tsnrhtdd"[(n / 10 % 10 != 1) * (n % 10 < 4) * n % 10::4])
def random_string(length: int) -> str:
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
def send_reqs_batches_and_get_suff_replies(
looper: Looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client,
num_reqs: int,
num_batches=1,
**kwargs):
if num_batches == 1:
return sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, num_reqs)
else:
requests = []
for _ in range(num_batches - 1):
requests.extend(
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, num_reqs // num_batches))
rem = num_reqs % num_batches
if rem == 0:
rem = num_reqs // num_batches
requests.extend(
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, rem))
return requests
def checkResponseCorrectnessFromNodes(receivedMsgs: Iterable, reqId: int,
fValue: int) -> bool:
msgs = [(msg[f.RESULT.nm][f.REQ_ID.nm], msg[f.RESULT.nm][f.IDENTIFIER.nm])
for msg in getRepliesFromClientInbox(receivedMsgs, reqId)]
groupedMsgs = {}
for tpl in msgs:
groupedMsgs[tpl] = groupedMsgs.get(tpl, 0) + 1
assert max(groupedMsgs.values()) >= fValue + 1
def getRepliesFromClientInbox(inbox, reqId) -> list:
return list({_: msg for msg, _ in inbox if
msg[OP_FIELD_NAME] == REPLY and msg[f.RESULT.nm]
[f.REQ_ID.nm] == reqId}.values())
def checkLastClientReqForNode(node: TestNode, expectedRequest: Request):
recvRequest = getLastClientReqReceivedForNode(node)
assert recvRequest
assert expectedRequest.as_dict == recvRequest.as_dict
def assertLength(collection: Iterable[Any], expectedLength: int):
assert len(
collection) == expectedLength, "Observed length was {} but " \
"expected length was {}". \
format(len(collection), expectedLength)
def assertEquality(observed: Any, expected: Any, details=None):
assert observed == expected, "Observed value was {} but expected value " \
"was {}, details: {}".format(observed, expected, details)
def randomOperation():
return {
"type": BUY,
"amount": random.randint(10, 100000)
}
def random_requests(count):
return [randomOperation() for _ in range(count)]
def random_request_objects(count, protocol_version):
req_dicts = random_requests(count)
return [Request(operation=op, protocolVersion=protocol_version) for op in req_dicts]
def buildCompletedTxnFromReply(request, reply: Reply) -> Dict:
txn = request.operation
txn.update(reply)
return txn
async def msgAll(nodes):
for p in permutations(nodes, 2):
await sendMessageAndCheckDelivery(p[0], p[1])
def sendMessage(sender: Node,
reciever: Node,
msg: Optional[Tuple] = None):
logger.debug("Sending msg from {} to {}".format(sender.name, reciever.name))
msg = msg if msg else randomMsg()
rid = sender.nodestack.getRemote(reciever.name).uid
sender.nodestack.send(msg, rid)
async def sendMessageAndCheckDelivery(sender: Node,
reciever: Node,
msg: Optional[Tuple] = None,
method=None,
customTimeout=None):
logger.debug("Sending msg from {} to {}".format(sender.name, reciever.name))
msg = msg if msg else randomMsg()
rid = sender.nodestack.getRemote(reciever.name).uid
sender.nodestack.send(msg, rid)
timeout = customTimeout or waits.expectedNodeToNodeMessageDeliveryTime()
await eventually(checkMessageReceived, msg, reciever, method,
retryWait=.1,
timeout=timeout,
ratchetSteps=10)
def sendMessageToAll(nodes,
sender: Node,
msg: Optional[Tuple] = None):
for node in nodes:
if node != sender:
sendMessage(sender, node, msg)
async def sendMessageAndCheckDeliveryToAll(nodes,
sender: Node,
msg: Optional[Tuple] = None,
method=None,
customTimeout=None):
customTimeout = customTimeout or waits.expectedNodeToAllNodesMessageDeliveryTime(
len(nodes))
for node in nodes:
if node != sender:
await sendMessageAndCheckDelivery(sender, node, msg, method, customTimeout)
break
def checkMessageReceived(msg, receiver, method: str = None):
allMsgs = getAllMsgReceivedForNode(receiver, method)
assert msg in allMsgs
def addNodeBack(node_set,
looper: Looper,
node: Node,
tconf,
tdir) -> TestNode:
config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir)
restartedNode = TestNode(node.name,
config_helper=config_helper,
config=tconf,
ha=node.nodestack.ha,
cliha=node.clientstack.ha)
node_set.append(restartedNode)
looper.add(restartedNode)
return restartedNode
def checkPropagateReqCountOfNode(node: TestNode, digest: str):
assert digest in node.requests
assert node.quorums.propagate.is_reached(
len(node.requests[digest].propagates))
def requestReturnedToNode(node: TestNode, key: str,
instId: int):
params = getAllArgs(node, node.processOrdered)
recvdOrderedReqs = [
(p['ordered'].instId, p['ordered'].valid_reqIdr[0]) for p in params]
expected = (instId, key)
return expected in recvdOrderedReqs
def checkRequestReturnedToNode(node: TestNode, key: str,
instId: int):
assert requestReturnedToNode(node, key, instId)
def checkRequestNotReturnedToNode(node: TestNode, key: str,
instId: int):
assert not requestReturnedToNode(node, key, instId)
def check_request_is_not_returned_to_nodes(txnPoolNodeSet, request):
instances = range(getNoInstances(len(txnPoolNodeSet)))
for node, inst_id in itertools.product(txnPoolNodeSet, instances):
checkRequestNotReturnedToNode(node,
request.key,
inst_id)
def checkPrePrepareReqSent(replica: TestReplica, req: Request):
prePreparesSent = getAllArgs(replica._ordering_service,
replica._ordering_service.send_pre_prepare)
assert (req.digest,) in \
[p["ppReq"].reqIdr for p in prePreparesSent]
def checkPrePrepareReqRecvd(replicas: Iterable[TestReplica],
expectedRequest: PrePrepare):
for replica in replicas:
params = getAllArgs(replica._ordering_service, replica._ordering_service._can_process_pre_prepare)
assert expectedRequest.reqIdr in [p['pre_prepare'].reqIdr for p in params]
def checkPrepareReqSent(replica: TestReplica, key: str,
view_no: int):
paramsList = getAllArgs(replica._ordering_service, replica._ordering_service._can_prepare)
rv = getAllReturnVals(replica._ordering_service,
replica._ordering_service._can_prepare)
args = [p["ppReq"].reqIdr for p in paramsList if p["ppReq"].viewNo == view_no]
assert (key,) in args
idx = args.index((key,))
assert rv[idx]
def checkSufficientPrepareReqRecvd(replica: TestReplica, viewNo: int,
ppSeqNo: int):
key = (viewNo, ppSeqNo)
assert key in replica._ordering_service.prepares
assert len(replica._ordering_service.prepares[key][1]) >= replica.quorums.prepare.value
def checkSufficientCommitReqRecvd(replicas: Iterable[TestReplica], viewNo: int,
ppSeqNo: int):
for replica in replicas:
key = (viewNo, ppSeqNo)
assert key in replica._ordering_service.commits
received = len(replica._ordering_service.commits[key][1])
minimum = replica.quorums.commit.value
assert received > minimum
def checkViewNoForNodes(nodes: Iterable[TestNode], expectedViewNo: int = None):
viewNos = set()
for node in nodes:
logger.debug("{}'s view no is {}".format(node, node.master_replica.viewNo))
viewNos.add(node.master_replica.viewNo)
assert len(viewNos) == 1, 'Expected 1, but got {}. ' \
'ViewNos: {}'.format(len(viewNos), [(n.name, n.master_replica.viewNo) for n in nodes])
vNo, = viewNos
if expectedViewNo is not None:
assert vNo >= expectedViewNo, \
'Expected at least {}, but got {}'.format(expectedViewNo, vNo)
return vNo
def waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=None,
customTimeout=None):
timeout = customTimeout or waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
return looper.run(eventually(checkViewNoForNodes,
txnPoolNodeSet,
expectedViewNo,
timeout=timeout))
def getNodeSuspicions(node: TestNode, code: int = None):
params = getAllArgs(node, TestNode.reportSuspiciousNode)
if params and code is not None:
params = [param for param in params
if 'code' in param and param['code'] == code]
return params
def checkDiscardMsg(processors, discardedMsg,
reasonRegexp, *exclude):
if not exclude:
exclude = []
for p in filterNodeSet(processors, exclude):
last = p.spylog.getLastParams(p.discard, required=False)
assert last
assert last['msg'] == discardedMsg
assert reasonRegexp in last['reason']
def checkMasterReplicaDiscardMsg(processors, discardedMsg,
reasonRegexp, *exclude):
if not exclude:
exclude = []
for p in filterNodeSet(processors, exclude):
stasher = p.master_replica.stasher
last = stasher.spylog.getLastParams(stasher.discard, required=False)
assert last
assert last['msg'] == discardedMsg
assert reasonRegexp in last['reason']
def countDiscarded(processor, reasonPat):
c = 0
for entry in processor.spylog.getAll(processor.discard):
if 'reason' in entry.params and (
(isinstance(
entry.params['reason'],
str) and reasonPat in entry.params['reason']),
(reasonPat in str(
entry.params['reason']))):
c += 1
return c
def filterNodeSet(nodeSet, exclude: List[Union[str, Node]]):
return [n for n in nodeSet
if n not in
[nodeSet[x] if isinstance(x, str) else x for x in exclude]]
def whitelistNode(toWhitelist: str, frm: Sequence[TestNode], *codes):
for node in frm:
node.whitelistNode(toWhitelist, *codes)
def whitelistClient(toWhitelist: str, frm: Sequence[TestNode], *codes):
for node in frm:
node.whitelistClient(toWhitelist, *codes)
def assertExp(condition):
assert condition
def assert_eq(actual, expected):
assert actual == expected
def assert_in(value, collection):
assert value in collection
def assertFunc(func):
assert func()
def checkLedgerEquality(ledger1, ledger2):
assertLength(ledger1, ledger2.size)
assertEquality(ledger1.root_hash, ledger2.root_hash)
assertEquality(ledger1.uncommitted_root_hash, ledger2.uncommitted_root_hash)
def checkAllLedgersEqual(*ledgers):
for l1, l2 in combinations(ledgers, 2):
checkLedgerEquality(l1, l2)
def checkStateEquality(state1, state2):
if state1 is None:
return state2 is None
assertEquality(state1.as_dict, state2.as_dict)
assertEquality(state1.committedHeadHash, state2.committedHeadHash)
assertEquality(state1.committedHead, state2.committedHead)
def check_seqno_db_equality(db1, db2):
if db1._keyValueStorage._db is None or db2._keyValueStorage._db is None:
return False
assert db1.size == db2.size, \
"{} != {}".format(db1.size, db2.size)
assert {bytes(k): bytes(v) for k, v in db1._keyValueStorage.iterator()} == \
{bytes(k): bytes(v) for k, v in db2._keyValueStorage.iterator()}
def check_primaries_equality(node1, node2):
assert node1.primaries == node2.primaries, \
"{} != {}, Node1: {}; Node2: {}".format(node1.primaries, node2.primaries, node1, node2)
def check_last_ordered_3pc(node1, node2):
master_replica_1 = node1.master_replica
master_replica_2 = node2.master_replica
assert master_replica_1.last_ordered_3pc == master_replica_2.last_ordered_3pc, \
"{} != {} Node1: {}, Node2: {}".format(master_replica_1.last_ordered_3pc,
master_replica_2.last_ordered_3pc,
node1, node2)
return master_replica_1.last_ordered_3pc
def check_last_ordered_3pc_backup(node1, node2):
assert len(node1.replicas) == len(node2.replicas)
for i in range(1, len(node1.replicas)):
replica1 = node1.replicas[i]
replica2 = node2.replicas[i]
assert replica1.last_ordered_3pc == replica2.last_ordered_3pc, \
"{}: {} != {}: {}".format(replica1, replica1.last_ordered_3pc,
replica2, replica2.last_ordered_3pc)
def check_view_no(node1, node2):
assert node1.master_replica.viewNo == node2.master_replica.viewNo, \
"{} != {}".format(node1.master_replica.viewNo, node2.master_replica.viewNo)
def check_last_ordered_3pc_on_all_replicas(nodes, last_ordered_3pc):
for n in nodes:
for r in n.replicas.values():
assert r.last_ordered_3pc == last_ordered_3pc, \
"{} != {}, Replica: {}".format(r.last_ordered_3pc,
last_ordered_3pc, r)
def check_last_ordered_3pc_on_master(nodes, last_ordered_3pc):
for n in nodes:
assert n.master_replica.last_ordered_3pc == last_ordered_3pc, \
"{} != {}".format(n.master_replica.last_ordered_3pc,
last_ordered_3pc)
def check_last_ordered_3pc_on_backup(nodes, last_ordered_3pc):
for n in nodes:
for i, r in n.replicas.items():
if i != 0:
assert r.last_ordered_3pc == last_ordered_3pc, \
"{} != {}".format(r.last_ordered_3pc,
last_ordered_3pc)
def randomText(size):
return ''.join(random.choice(string.ascii_letters) for _ in range(size))
def mockGetInstalledDistributions(packages):
ret = []
for pkg in packages:
obj = type('', (), {})()
obj.key = pkg
ret.append(obj)
return ret
def mockImportModule(moduleName):
obj = type(moduleName, (), {})()
obj.send_message = lambda *args: None
return obj
def initDirWithGenesisTxns(
dirName,
tconf,
tdirWithPoolTxns=None,
tdirWithDomainTxns=None,
new_pool_txn_file=None,
new_domain_txn_file=None):
os.makedirs(dirName, exist_ok=True)
if tdirWithPoolTxns:
new_pool_txn_file = new_pool_txn_file or tconf.poolTransactionsFile
copyfile(
os.path.join(
tdirWithPoolTxns, genesis_txn_file(
tconf.poolTransactionsFile)), os.path.join(
dirName, genesis_txn_file(new_pool_txn_file)))
if tdirWithDomainTxns:
new_domain_txn_file = new_domain_txn_file or tconf.domainTransactionsFile
copyfile(
os.path.join(
tdirWithDomainTxns, genesis_txn_file(
tconf.domainTransactionsFile)), os.path.join(
dirName, genesis_txn_file(new_domain_txn_file)))
def stopNodes(nodes: List[TestNode], looper=None, ensurePortsFreedUp=True):
if ensurePortsFreedUp:
assert looper, 'Need a looper to make sure ports are freed up'
for node in nodes:
node.stop()
if ensurePortsFreedUp:
ports = [[n.nodestack.ha[1], n.clientstack.ha[1]] for n in nodes]
waitUntilPortIsAvailable(looper, ports)
def waitUntilPortIsAvailable(looper, ports, timeout=5):
ports = itertools.chain(*ports)
def chk():
for port in ports:
checkPortAvailable(("", port))
looper.run(eventually(chk, retryWait=.5, timeout=timeout))
def run_script(script, *args):
s = os.path.join(os.path.dirname(__file__), '../../scripts/' + script)
command = [executable, s]
command.extend(args)
with Popen([executable, s]) as p:
sleep(4)
p.send_signal(SIGINT)
p.wait(timeout=1)
assert p.poll() == 0, 'script failed'
def viewNoForNodes(nodes):
viewNos = {node.viewNo for node in nodes}
assert 1 == len(viewNos)
return next(iter(viewNos))
def primaryNodeNameForInstance(nodes, instanceId):
primaryNames = {node.replicas[instanceId].primaryName for node in nodes}
assert 1 == len(primaryNames)
primaryReplicaName = next(iter(primaryNames))
return primaryReplicaName[:-2]
def nodeByName(nodes, name):
for node in nodes:
if node.name == name:
return node
raise Exception("Node with the name '{}' has not been found.".format(name))
def send_pre_prepare(view_no, pp_seq_no, nodes,
state_root=None, txn_root=None):
pre_prepare = PrePrepare(
0,
view_no,
pp_seq_no,
get_utc_epoch(),
["requests digest"],
0,
"random digest",
DOMAIN_LEDGER_ID,
state_root or '0' * 44,
txn_root or '0' * 44,
0,
True
)
primary_node = getPrimaryReplica(nodes).node
non_primary_nodes = set(nodes) - {primary_node}
sendMessageToAll(nodes, primary_node, pre_prepare)
for non_primary_node in non_primary_nodes:
sendMessageToAll(nodes, non_primary_node, pre_prepare)
def send_prepare(view_no, pp_seq_no, nodes, state_root=None, txn_root=None):
prepare = Prepare(
0,
view_no,
pp_seq_no,
get_utc_epoch(),
"random digest",
state_root or '0' * 44,
txn_root or '0' * 44
)
primary_node = getPrimaryReplica(nodes).node
sendMessageToAll(nodes, primary_node, prepare)
def send_commit(view_no, pp_seq_no, nodes):
commit = Commit(
0,
view_no,
pp_seq_no)
primary_node = getPrimaryReplica(nodes).node
sendMessageToAll(nodes, primary_node, commit)
def get_key_from_req(req: dict):
return Request(identifier=req[f.IDENTIFIER.nm],
reqId=req[f.REQ_ID.nm],
operation=req[OPERATION],
protocolVersion=req[f.PROTOCOL_VERSION.nm],
signature=req.get(f.SIG.nm),
taaAcceptance=req.get(f.TAA_ACCEPTANCE)
).key
def chk_all_funcs(looper, funcs, acceptable_fails=0, retry_wait=None,
timeout=None, override_eventually_timeout=False):
# TODO: Move this logic to eventuallyAll
def chk():
fails = 0
last_ex = None
for func in funcs:
try:
func()
except Exception as ex:
fails += 1
if fails >= acceptable_fails:
logger.debug('Too many fails, the last one: {}'.format(repr(ex)))
last_ex = ex
assert fails <= acceptable_fails, '{} out of {} failed. Last exception:' \
' {}'.format(fails, len(funcs), last_ex)
kwargs = {}
if retry_wait:
kwargs['retryWait'] = retry_wait
if timeout:
kwargs['timeout'] = timeout
if override_eventually_timeout:
kwargs['override_timeout_limit'] = override_eventually_timeout
looper.run(eventually(chk, **kwargs))
def check_request_ordered(node, request: Request):
# it's ok to iterate through all txns since this is a test
for seq_no, txn in node.domainLedger.getAllTxn():
if get_req_id(txn) is None:
continue
if get_from(txn) is None:
continue
if get_req_id(txn) != request.reqId:
continue
if get_from(txn) != request.identifier:
continue
return True
raise ValueError('{} request not ordered by node {}'.format(request, node.name))
def wait_for_requests_ordered(looper, nodes, requests):
node_count = len(nodes)
timeout_per_request = waits.expectedTransactionExecutionTime(node_count)
total_timeout = (1 + len(requests) / 10) * timeout_per_request
coros = [partial(check_request_ordered,
node,
request)
for (node, request) in list(itertools.product(nodes, requests))]
looper.run(eventuallyAll(*coros, retryWait=1, totalTimeout=total_timeout))
def create_new_test_node(test_node_class, node_config_helper_class, name, conf,
tdir, plugin_paths, bootstrap_cls=None,
node_ha=None, client_ha=None):
config_helper = node_config_helper_class(name, conf, chroot=tdir)
return test_node_class(name,
config_helper=config_helper,
config=conf,
pluginPaths=plugin_paths,
ha=node_ha,
cliha=client_ha,
bootstrap_cls=bootstrap_cls)
NT_PROTOCOL_VERSION,
identifier=None, **kwargs):
return Request(operation=operation, reqId=random.randint(10, 1000000000),
protocolVersion=protocol_version, identifier=identifier,
**kwargs)
def sdk_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did):
_, new_steward_did = sdk_wallet_new_steward
node_ip = '{}.{}.{}.{}'.format(
random.randint(1, 240),
random.randint(1, 240),
random.randint(1, 240),
random.randint(1, 240))
data = {
'alias': node_alias,
'client_port': 50001,
'node_port': 50002,
'node_ip': node_ip,
'client_ip': node_ip,
'services': []
}
req = looper.loop.run_until_complete(
build_node_request(new_steward_did, node_did, json.dumps(data)))
return Request(**json.loads(req))
def sdk_random_request_objects(count, protocol_version, identifier=None,
**kwargs):
ops = random_requests(count)
return [sdk_gen_request(op, protocol_version=protocol_version,
identifier=identifier, **kwargs) for op in ops]
def sdk_sign_request_objects(looper, sdk_wallet, reqs: Sequence):
wallet_h, did = sdk_wallet
reqs_str = [json.dumps(req.as_dict) for req in reqs]
reqs = [looper.loop.run_until_complete(sign_request(wallet_h, did, req))
for req in reqs_str]
return reqs
def sdk_multi_sign_request_objects(looper, sdk_wallets, reqs: Sequence):
reqs_str = [json.dumps(req.as_dict) for req in reqs]
for sdk_wallet in sdk_wallets:
wallet_h, did = sdk_wallet
reqs_str = [looper.loop.run_until_complete(multi_sign_request(wallet_h, did, req))
for req in reqs_str]
return reqs_str
def sdk_sign_request_strings(looper, sdk_wallet, reqs: Sequence):
wallet_h, did = sdk_wallet
reqs_str = [json.dumps(req) for req in reqs]
reqs = [looper.loop.run_until_complete(sign_request(wallet_h, did, req))
for req in reqs_str]
return reqs
def sdk_multisign_request_object(looper, sdk_wallet, req):
wh, did = sdk_wallet
return looper.loop.run_until_complete(multi_sign_request(wh, did, req))
def sdk_multisign_request_from_dict(looper, sdk_wallet, op, reqId=None, taa_acceptance=None, endorser=None):
wh, did = sdk_wallet
reqId = reqId or random.randint(10, 100000)
request = Request(operation=op, reqId=reqId,
protocolVersion=CURRENT_PROTOCOL_VERSION, identifier=did,
taaAcceptance=taa_acceptance,
endorser=endorser)
req_str = json.dumps(request.as_dict)
resp = looper.loop.run_until_complete(multi_sign_request(wh, did, req_str))
return json.loads(resp)
def sdk_signed_random_requests(looper, sdk_wallet, count):
_, did = sdk_wallet
reqs_obj = sdk_random_request_objects(count, identifier=did,
protocol_version=CURRENT_PROTOCOL_VERSION)
return sdk_sign_request_objects(looper, sdk_wallet, reqs_obj)
def sdk_send_signed_requests(pool_h, signed_reqs: Sequence):
return [(json.loads(req),
asyncio.ensure_future(submit_request(pool_h, req)))
for req in signed_reqs]
def sdk_send_random_requests(looper, pool_h, sdk_wallet, count: int):
reqs = sdk_signed_random_requests(looper, sdk_wallet, count)
return sdk_send_signed_requests(pool_h, reqs)
def sdk_send_random_request(looper, pool_h, sdk_wallet):
rets = sdk_send_random_requests(looper, pool_h, sdk_wallet, 1)
return rets[0]
def sdk_send_random_pool_requests(looper, pool_h, sdk_wallet_new_steward, count: int):
node_alias = random_string(7)
node_did = SimpleSigner(seed=random_string(32).encode()).identifier
reqs = [sdk_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did) for _ in range(count)]
return [sdk_sign_and_submit_req_obj(looper, pool_h, sdk_wallet_new_steward, req) for req in reqs]
def sdk_send_random_pool_and_domain_requests(looper, pool_h, sdk_wallet_new_steward, count: int):
node_alias = random_string(7)
node_did = SimpleSigner(seed=random_string(32).encode()).identifier
req_gens = [
lambda: sdk_gen_request(random_requests(1)[0], identifier=sdk_wallet_new_steward[1]),
lambda: sdk_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did),
]
res = []
for i in range(count):
req = req_gens[i % len(req_gens)]()
res.append(sdk_sign_and_submit_req_obj(looper, pool_h, sdk_wallet_new_steward, req))
looper.runFor(0.1)
return res
def sdk_sign_and_submit_req(pool_handle, sdk_wallet, req):
wallet_handle, sender_did = sdk_wallet
return json.loads(req), asyncio.ensure_future(
sign_and_submit_request(pool_handle, wallet_handle, sender_did, req))
def sdk_sign_and_submit_req_obj(looper, pool_handle, sdk_wallet, req_obj):
s_req = sdk_sign_request_objects(looper, sdk_wallet, [req_obj])[0]
return sdk_send_signed_requests(pool_handle, [s_req])[0]
def sdk_sign_and_submit_op(looper, pool_handle, sdk_wallet, op):
_, did = sdk_wallet
req_obj = sdk_gen_request(op, protocol_version=CURRENT_PROTOCOL_VERSION,
identifier=did)
s_req = sdk_sign_request_objects(looper, sdk_wallet, [req_obj])[0]
return sdk_send_signed_requests(pool_handle, [s_req])[0]
def sdk_get_reply(looper, sdk_req_resp, timeout=None):
req_json, resp_task = sdk_req_resp
if timeout is None:
timeout = waits.expectedTransactionExecutionTime(7)
try:
resp = looper.run(asyncio.wait_for(resp_task, timeout=timeout))
resp = json.loads(resp)
except IndyError as e:
resp = e.error_code
except TimeoutError as e:
resp = ErrorCode.PoolLedgerTimeout
return req_json, resp
# validity
def sdk_get_replies(looper, sdk_req_resp: Sequence, timeout=None):
resp_tasks = [resp for _, resp in sdk_req_resp]
# TODO: change timeout evaluating logic, when sdk will can tuning timeout from outside
if timeout is None:
timeout = waits.expectedTransactionExecutionTime(7)
def get_res(task, done_list):
if task in done_list:
try:
resp = json.loads(task.result())
except IndyError as e:
resp = e.error_code
else:
resp = ErrorCode.PoolLedgerTimeout
return resp
done, pending = looper.run(asyncio.wait(resp_tasks, timeout=timeout))
if pending:
for task in pending:
task.cancel()
ret = [(req, get_res(resp, done)) for req, resp in sdk_req_resp]
return ret
def sdk_check_reply(req_res):
req, res = req_res
if isinstance(res, ErrorCode):
if res == ErrorCode.PoolLedgerTimeout:
raise PoolLedgerTimeoutException('Got PoolLedgerTimeout for request {}'
.format(req))
else:
raise CommonSdkIOException('Got an error with code {} for request {}'
.format(res, req))
if not isinstance(res, dict):
raise CommonSdkIOException("Unexpected response format {}".format(res))
def _parse_op(res_dict):
if res_dict['op'] == REQNACK:
raise RequestNackedException('ReqNack of id {}. Reason: {}'
.format(req['reqId'], res_dict['reason']))
if res_dict['op'] == REJECT:
raise RequestRejectedException('Reject of id {}. Reason: {}'
.format(req['reqId'], res_dict['reason']))
if 'op' in res:
_parse_op(res)
else:
for resps in res.values():
if isinstance(resps, str):
_parse_op(json.loads(resps))
elif isinstance(resps, dict):
_parse_op(resps)
else:
raise CommonSdkIOException("Unexpected response format {}".format(res))
def sdk_get_and_check_replies(looper, sdk_req_resp: Sequence, timeout=None):
rets = []
for req_res in sdk_get_replies(looper, sdk_req_resp, timeout):
sdk_check_reply(req_res)
rets.append(req_res)
return rets
def sdk_eval_timeout(req_count: int, node_count: int,
customTimeoutPerReq: float = None, add_delay_to_timeout: float = 0):
timeout_per_request = customTimeoutPerReq or waits.expectedTransactionExecutionTime(node_count)
timeout_per_request += add_delay_to_timeout
# here we try to take into account what timeout for execution
# N request - total_timeout should be in
# timeout_per_request < total_timeout < timeout_per_request * N
# we cannot just take (timeout_per_request * N) because it is so huge.
# (for timeout_per_request=5 and N=10, total_timeout=50sec)
# lets start with some simple formula:
return (1 + req_count / 10) * timeout_per_request
def sdk_send_and_check(signed_reqs, looper, txnPoolNodeSet, pool_h, timeout=None):
if not timeout:
timeout = sdk_eval_timeout(len(signed_reqs), len(txnPoolNodeSet))
results = sdk_send_signed_requests(pool_h, signed_reqs)
sdk_replies = sdk_get_replies(looper, results, timeout=timeout)
for req_res in sdk_replies:
sdk_check_reply(req_res)
return sdk_replies
def sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet, count,
customTimeoutPerReq: float = None, add_delay_to_timeout: float = 0,
override_timeout_limit=False, total_timeout=None):
sdk_reqs = sdk_send_random_requests(looper, sdk_pool, sdk_wallet, count)
if not total_timeout:
total_timeout = sdk_eval_timeout(len(sdk_reqs), len(txnPoolNodeSet),
customTimeoutPerReq=customTimeoutPerReq,
add_delay_to_timeout=add_delay_to_timeout)
sdk_replies = sdk_get_replies(looper, sdk_reqs, timeout=total_timeout)
for req_res in sdk_replies:
sdk_check_reply(req_res)
return sdk_replies
def sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet,
num_reqs, num_batches=1, **kwargs):
# This method assumes that `num_reqs` <= num_batches*MaxbatchSize
if num_reqs < num_batches:
raise BaseException(
'sdk_send_batches_of_random_and_check method assumes that `num_reqs` <= num_batches*MaxbatchSize')
if num_batches == 1:
return sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet, num_reqs, **kwargs)
reqs_in_batch = num_reqs // num_batches
reqs_in_last_batch = reqs_in_batch + num_reqs % num_batches
sdk_replies = []
for _ in range(num_batches - 1):
sdk_replies.extend(sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool, sdk_wallet,
reqs_in_batch, **kwargs))
sdk_replies.extend(sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool, sdk_wallet,
reqs_in_last_batch, **kwargs))
return sdk_replies
def sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool, sdk_wallet,
num_reqs, num_batches=1, timeout=Max3PCBatchWait):
if num_reqs < num_batches:
raise BaseException(
'sdk_send_batches_of_random_and_check method assumes that `num_reqs` <= num_batches*MaxbatchSize')
if num_batches == 1:
sdk_reqs = sdk_send_random_requests(looper, sdk_pool, sdk_wallet, num_reqs)
looper.runFor(timeout)
return sdk_reqs
reqs_in_batch = num_reqs // num_batches
reqs_in_last_batch = reqs_in_batch + num_reqs % num_batches
sdk_reqs = []
for _ in range(num_batches - 1):
sdk_reqs.extend(sdk_send_random_requests(looper, sdk_pool, sdk_wallet, reqs_in_batch))
looper.runFor(timeout)
sdk_reqs.extend(sdk_send_random_requests(looper, sdk_pool, sdk_wallet, reqs_in_last_batch))
looper.runFor(timeout)
return sdk_reqs
def sdk_sign_request_from_dict(looper, sdk_wallet, op, reqId=None, taa_acceptance=None, endorser=None):
wallet_h, did = sdk_wallet
reqId = reqId or random.randint(10, 100000)
request = Request(operation=op, reqId=reqId,
protocolVersion=CURRENT_PROTOCOL_VERSION, identifier=did,
taaAcceptance=taa_acceptance,
endorser=endorser)
req_str = json.dumps(request.as_dict)
resp = looper.loop.run_until_complete(sign_request(wallet_h, did, req_str))
return json.loads(resp)
def sdk_check_request_is_not_returned_to_nodes(looper, nodeSet, request):
instances = range(getNoInstances(len(nodeSet)))
coros = []
for node, inst_id in itertools.product(nodeSet, instances):
c = partial(checkRequestNotReturnedToNode,
node=node,
identifier=request['identifier'],
reqId=request['reqId'],
instId=inst_id
)
coros.append(c)
timeout = waits.expectedTransactionExecutionTime(len(nodeSet))
looper.run(eventuallyAll(*coros, retryWait=1, totalTimeout=timeout))
def sdk_json_to_request_object(json_req):
return Request(identifier=json_req.get('identifier', None),
reqId=json_req['reqId'],
operation=json_req['operation'],
signature=json_req['signature'] if 'signature' in json_req else None,
protocolVersion=json_req['protocolVersion'] if 'protocolVersion' in json_req else None,
taaAcceptance=json_req.get('taaAcceptance', None))
def sdk_json_couples_to_request_list(json_couples):
req_list = []
for json_couple in json_couples:
req_list.append(sdk_json_to_request_object(json_couple[0]))
return req_list
def sdk_get_bad_response(looper, reqs, exception, message):
with pytest.raises(exception) as e:
sdk_get_and_check_replies(looper, reqs)
assert message in e._excinfo[1].args[0]
def sdk_set_protocol_version(looper, version=CURRENT_PROTOCOL_VERSION):
looper.loop.run_until_complete(set_protocol_version(version))
# Context managers to be used with tconf fixture
@contextmanager
def perf_monitor_disabled(tconf):
old_unsafe = tconf.unsafe.copy()
tconf.unsafe.add("disable_view_change")
yield tconf
tconf.unsafe = old_unsafe
@contextmanager
def view_change_timeout(tconf, vc_timeout, propose_timeout=None):
old_view_change_timeout = tconf.NEW_VIEW_TIMEOUT
old_propose_timeout = tconf.INITIAL_PROPOSE_VIEW_CHANGE_TIMEOUT
old_propagate_request_delay = tconf.PROPAGATE_REQUEST_DELAY
tconf.NEW_VIEW_TIMEOUT = vc_timeout
tconf.INITIAL_PROPOSE_VIEW_CHANGE_TIMEOUT = vc_timeout if propose_timeout is None else propose_timeout
tconf.PROPAGATE_REQUEST_DELAY = 0
yield tconf
tconf.NEW_VIEW_TIMEOUT = old_view_change_timeout
tconf.INITIAL_PROPOSE_VIEW_CHANGE_TIMEOUT = old_propose_timeout
tconf.PROPAGATE_REQUEST_DELAY = old_propagate_request_delay
@contextmanager
def max_3pc_batch_limits(tconf, size, wait=10000):
old_size = tconf.Max3PCBatchSize
old_wait = tconf.Max3PCBatchWait
tconf.Max3PCBatchSize = size
tconf.Max3PCBatchWait = wait
yield tconf
tconf.Max3PCBatchSize = old_size
tconf.Max3PCBatchWait = old_wait
@contextmanager
def freshness(tconf, enabled, timeout):
old_update_state = tconf.UPDATE_STATE_FRESHNESS
old_timeout = tconf.STATE_FRESHNESS_UPDATE_INTERVAL
tconf.UPDATE_STATE_FRESHNESS = enabled
tconf.STATE_FRESHNESS_UPDATE_INTERVAL = timeout
yield tconf
tconf.UPDATE_STATE_FRESHNESS = old_update_state
tconf.STATE_FRESHNESS_UPDATE_INTERVAL = old_timeout
@contextmanager
def primary_disconnection_time(tconf, value):
old_tolarate_disconnection = tconf.ToleratePrimaryDisconnection
tconf.ToleratePrimaryDisconnection = value
yield tconf
tconf.ToleratePrimaryDisconnection = old_tolarate_disconnection
@contextmanager
def acc_monitor(tconf, acc_monitor_enabled=True, acc_monitor_timeout=3, acc_monitor_delta=0):
old_timeout = tconf.ACC_MONITOR_TIMEOUT
old_delta = tconf.ACC_MONITOR_TXN_DELTA_K
old_acc_monitor_enabled = tconf.ACC_MONITOR_ENABLED
tconf.ACC_MONITOR_TIMEOUT = acc_monitor_timeout
tconf.ACC_MONITOR_TXN_DELTA_K = acc_monitor_delta
tconf.ACC_MONITOR_ENABLED = acc_monitor_enabled
yield tconf
tconf.ACC_MONITOR_TIMEOUT = old_timeout
tconf.ACC_MONITOR_TXN_DELTA_K = old_delta
tconf.ACC_MONITOR_ENABLED = old_acc_monitor_enabled
def create_pre_prepare_params(state_root,
ledger_id=DOMAIN_LEDGER_ID,
txn_root=None,
timestamp=None,
bls_multi_sig=None,
view_no=0,
pool_state_root=None,
pp_seq_no=0,
inst_id=0,
audit_txn_root=None,
reqs=None,
bls_multi_sigs=None):
if timestamp is None:
timestamp = get_utc_epoch()
req_idrs = [req.key for req in reqs] if reqs is not None else [random_string(32)]
digest = OrderingService.generate_pp_digest(req_idrs, view_no, timestamp)
params = [inst_id,
view_no,
pp_seq_no,
timestamp,
req_idrs,
init_discarded(0),
digest,
ledger_id,
state_root,
txn_root or '1' * 32,
0,
True,
pool_state_root or generate_state_root(),
audit_txn_root or generate_state_root()]
if bls_multi_sig:
# Pass None for backward compatibility
params.append(None)
params.append([bls_multi_sig.as_list()])
elif bls_multi_sigs is not None:
# Pass None for backward compatibility
params.append(None)
params.append([sig.as_list() for sig in bls_multi_sigs])
return params
def create_pre_prepare_no_bls(state_root, view_no=0, pool_state_root=None, pp_seq_no=0, inst_id=0, audit_txn_root=None):
params = create_pre_prepare_params(state_root=state_root,
view_no=view_no,
pool_state_root=pool_state_root,
pp_seq_no=pp_seq_no,
inst_id=inst_id,
audit_txn_root=audit_txn_root)
return PrePrepare(*params)
def create_commit_params(view_no, pp_seq_no, inst_id=0):
return [inst_id, view_no, pp_seq_no]
def create_commit_no_bls_sig(req_key, inst_id=0):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no, inst_id=inst_id)
return Commit(*params)
def create_commit_with_bls_sig(req_key, bls_sig):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no)
# Use ' ' as BLS_SIG for backward-compatibility as BLS_SIG in COMMIT is optional but not Nullable
params.append(' ')
params.append({DOMAIN_LEDGER_ID: bls_sig})
return Commit(*params)
def create_commit_with_bls_sigs(req_key, bls_sig, lid):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no)
# Use ' ' as BLS_SIG for backward-compatibility as BLS_SIG in COMMIT is optional but not Nullable
params.append(' ')
params.append({str(lid): bls_sig})
return Commit(*params)
def create_commit_bls_sig(bls_bft, req_key, pre_prepare):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no)
params = bls_bft.update_commit(params, pre_prepare)
return Commit(*params)
def create_prepare_params(view_no, pp_seq_no, state_root, inst_id=0):
return [inst_id,
view_no,
pp_seq_no,
get_utc_epoch(),
"random digest",
state_root,
'1' * 32]
def create_prepare_from_pre_prepare(pre_prepare):
params = [pre_prepare.instId,
pre_prepare.viewNo,
pre_prepare.ppSeqNo,
pre_prepare.ppTime,
pre_prepare.digest,
pre_prepare.stateRootHash,
pre_prepare.txnRootHash,
pre_prepare.auditTxnRootHash]
return Prepare(*params)
def create_commit_from_pre_prepare(pre_prepare):
params = [pre_prepare.instId,
pre_prepare.viewNo,
pre_prepare.ppSeqNo]
return Commit(*params)
def create_prepare(req_key, state_root, inst_id=0):
view_no, pp_seq_no = req_key
params = create_prepare_params(view_no, pp_seq_no, state_root, inst_id=inst_id)
return Prepare(*params)
def generate_state_root():
return base58.b58encode(os.urandom(32)).decode("utf-8")
def init_discarded(value=None):
discarded = []
if value:
discarded.append(value)
return invalid_index_serializer.serialize(discarded, toBytes=False)
def incoming_3pc_msgs_count(nodes_count: int = 4) -> int:
pre_prepare = 1 # Message from Primary
prepares = nodes_count - 2 # Messages from all nodes exclude primary and self node
commits = nodes_count - 1 # Messages from all nodes exclude self node
# The primary node receives the same number of messages. Doesn't get pre-prepare,
return pre_prepare + prepares + commits
def check_missing_pre_prepares(nodes, count):
assert all(count <= len(replica._ordering_service.prePreparesPendingPrevPP)
for replica in getNonPrimaryReplicas(nodes, instId=0))
class MockTimestamp:
def __init__(self, value=datetime.utcnow()):
self.value = value
def __call__(self):
return self.value
class MockTimer(QueueTimer):
def __init__(self, start_time: int = 0):
self._ts = MockTimestamp(start_time)
QueueTimer.__init__(self, self._ts)
def set_time(self, value):
self._ts.value = value
self._log_time()
self.service()
def sleep(self, seconds):
self.set_time(self._ts.value + seconds)
def advance(self):
if not self._events:
return
event = self._pop_event()
self._ts.value = event.timestamp
self._log_time()
event.callback()
def advance_until(self, value):
while self._events and self._next_timestamp() <= value:
self.advance()
self._ts.value = value
def run_for(self, seconds):
self.advance_until(self._ts.value + seconds)
def wait_for(self, condition: Callable[[], bool], timeout: Optional = None, max_iterations: int = 10000):
counter = 0
deadline = self._ts.value + timeout if timeout else None
while self._events and not condition() and counter < max_iterations:
if deadline and self._next_timestamp() > deadline:
raise TimeoutError("Failed to reach condition in required time, {} iterations passed".format(counter))
self.advance()
counter += 1
if not condition():
if not self._events:
raise TimeoutError("Condition will be never reached, {} iterations passed".format(counter))
else:
raise TimeoutError("Failed to reach condition in {} iterations".format(max_iterations))
def run_to_completion(self, max_iterations: int = 10000):
counter = 0
while self._events and counter < max_iterations:
self.advance()
counter += 1
if self._events:
raise TimeoutError("Failed to complete in {} iterations".format(max_iterations))
def _log_time(self):
logger.info("Virtual time: {}".format(self._ts.value))
class TestStopwatch:
def __init__(self, timer: Optional[TimerService] = None):
self._get_current_time = timer.get_current_time if timer else perf_counter
self._start_time = self._get_current_time()
def start(self):
self._start_time = self._get_current_time()
def has_elapsed(self, expected_delay: float, tolerance: float = 0.1) -> bool:
elapsed = self._get_current_time() - self._start_time
return abs(expected_delay - elapsed) <= expected_delay * tolerance
class TestInternalBus(InternalBus):
def __init__(self):
super().__init__()
self.sent_messages = []
def send(self, message: Any, *args):
self.sent_messages.append(message)
super().send(message, *args)
class MockNetwork(ExternalBus):
def __init__(self):
super().__init__(self._send_message)
self.sent_messages = []
def _send_message(self, msg: Any, dst: ExternalBus.Destination):
self.sent_messages.append((msg, dst))
def connect(self, name: str):
self.update_connecteds(self.connecteds.union({name}))
def disconnect(self, name: str):
self.update_connecteds(self.connecteds.difference({name}))
def get_handler_by_type_wm(write_manager, h_type):
for h_l in write_manager.request_handlers.values():
for h in h_l:
if isinstance(h, h_type):
return h
def create_pool_txn_data(node_names: List[str],
crypto_factory: BlsFactoryCrypto,
get_free_port: Callable[[], int],
nodes_with_bls: Optional[int] = None):
nodeCount = len(node_names)
data = {'txns': [], 'seeds': {}, 'nodesWithBls': {}}
for i, node_name in zip(range(1, nodeCount + 1), node_names):
data['seeds'][node_name] = node_name + '0' * (32 - len(node_name))
steward_name = 'Steward' + str(i)
data['seeds'][steward_name] = steward_name + '0' * (32 - len(steward_name))
n_idr = SimpleSigner(seed=data['seeds'][node_name].encode()).identifier
s_idr = DidSigner(seed=data['seeds'][steward_name].encode())
data['txns'].append(
Member.nym_txn(nym=s_idr.identifier,
verkey=s_idr.verkey,
role=STEWARD,
name=steward_name,
seq_no=i)
)
node_txn = Steward.node_txn(steward_nym=s_idr.identifier,
node_name=node_name,
nym=n_idr,
ip='127.0.0.1',
node_port=get_free_port(),
client_port=get_free_port(),
client_ip='127.0.0.1',
services=[VALIDATOR],
seq_no=i)
if nodes_with_bls is None or i <= nodes_with_bls:
_, bls_key, bls_key_proof = crypto_factory.generate_bls_keys(
seed=data['seeds'][node_name])
get_payload_data(node_txn)[DATA][BLS_KEY] = bls_key
get_payload_data(node_txn)[DATA][BLS_KEY_PROOF] = bls_key_proof
data['nodesWithBls'][node_name] = True
data['txns'].append(node_txn)
for i in range(4):
trustee_name = 'Trs' + str(i)
data['seeds'][trustee_name] = trustee_name + '0' * (
32 - len(trustee_name))
t_sgnr = DidSigner(seed=data['seeds'][trustee_name].encode())
data['txns'].append(
Member.nym_txn(nym=t_sgnr.identifier,
verkey=t_sgnr.verkey,
role=TRUSTEE,
name=trustee_name)
)
more_data_seeds = \
{
"Alice": "99999999999999999999999999999999",
"Jason": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
"John": "dddddddddddddddddddddddddddddddd",
"Les": "ffffffffffffffffffffffffffffffff"
}
more_data_users = []
for more_name, more_seed in more_data_seeds.items():
signer = DidSigner(seed=more_seed.encode())
more_data_users.append(
Member.nym_txn(nym=signer.identifier,
verkey=signer.verkey,
name=more_name,
creator="5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC")
)
data['txns'].extend(more_data_users)
data['seeds'].update(more_data_seeds)
return data
def get_pp_seq_no(nodes: list, inst_id=0) -> int:
los = set([n.replicas._replicas[inst_id].last_ordered_3pc[1] for n in nodes])
assert len(los) == 1
return los.pop()
| true
| true
|
7906cfabc618378219dca9026f2fabce212c9b65
| 458
|
py
|
Python
|
data/scripts/templates/object/static/structure/dantooine/shared_dant_boundary_post.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/static/structure/dantooine/shared_dant_boundary_post.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/static/structure/dantooine/shared_dant_boundary_post.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/dantooine/shared_dant_boundary_post.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 26.941176
| 84
| 0.731441
| true
| true
|
|
7906cfb86de423f9d982ed77fc575a8d59866742
| 8,765
|
py
|
Python
|
Python/Programação_em_Python_Essencial/5- Coleções/listas.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
Python/Programação_em_Python_Essencial/5- Coleções/listas.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
Python/Programação_em_Python_Essencial/5- Coleções/listas.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
"""
Listas
Listas em Python funcionam como vetores/matrizes (arrays) em outras linguagens, com a diferença
de serem DINÂMICO e também de podermos colocar QUALQUER tipo de dado.
Linguagens C/Java: Arrays
- Possuem tamanho e tipo de dado fixo;
Ou seja, nestas linguagens se você criar um array do tipo int e com tamanho 5, este array
sera SEMPRE do tipo inteiro e poderá ter SEMPRE no máximo 5 valores.
Já em Python:
- Dinâmico: Não possui tamanho fixo; Ou seja, podemos criar a lista e simplesmente ir adicionando elementos;
- Qualquer tipo de dado; Não possuem tipo de dado fixo; Ou seja, podemos colocar qualquer tipo de dado;
As listas são mutáveis!
As listas em Python são representadas por colchetes: []
type([])
lista1 = [1, 99, 4, 27, 15, 22, 3, 1, 44, 42, 27]
lista2 = ['G', 'e', 'e', 'k', ' ', 'U', 'n', 'i', 'v', 'e', 'r', 's', 'i', 't', 'y']
lista3 = []
lista4 = list(range(11))
lista5 = list('Geek University')
# Podemos facilmente checar se determinado valor está contido na lista
num = 18
if num in lista4:
print(f'Encontrei o número {num}')
else:
print(f'Não encontrei o número {num}')
# Podemos facilmente ordenar uma lista
print(lista1)
lista1.sort()
print(lista1)
# Podemos facilmente contar o número de ocorrências de um valor em uma lista
print(lista1)
print(lista1.count(1))
print(lista5)
print(lista5.count('e'))
# Adicionar elementos em listas
# Para adicionar elementos em listas, utilizamos a função append
print(lista1)
lista1.append(42)
print(lista1)
# OBS: Com append, nós só conseguimos adicionar um (1) elementos por vez
# lista1.append(12, 14, 56) # Erro
lista1.append([8, 3, 1]) # Coloca a lista como elemento único (sublista)
print(lista1)
if [8, 3, 1] in lista1:
print('Encontrei a lista')
else:
print('Nao encontrei a lista')
lista1.extend([123, 44, 67]) # Coloca cada elemento da lista como valor adicional á lista
print(lista1)
# Podemos inserir um novo elemento na lista informando a posição do índice
# Isso nao substitui o valor inicial. O mesmo será deslocado para a direita da lista.
lista1.insert(2, 'Novo Valor')
print(lista1)
# Podemos facilmente juntar duas listas
lista1 = lista1 + lista2
# lista1.extend(lista2)
print(lista1)
# Podemos facilmente inverter uma lista
# Forma 1
lista1.reverse()
lista2.reverse()
print(lista1)
print(lista2)
# Forma 2
print(lista1[::-1])
print(lista2[::-1])
# Copiar uma lista
lista6 = lista2.copy()
print(lista6)
# Podemos contar quantos elementos existem dentro da lista
print(len(lista1))
# Podemos remover facilmente o último elemento de uma lista
# O pop não somente remove o último elemento, mas também o retorna
print(lista5)
lista5.pop()
print(lista5)
# Podemos remover um elemento pelo índice
# OBS: Os elementos á direita deste índice serão deslocados para a esquerda.
# OBS: Se não houver elemento no índice informado, teremos o erro IndexError
lista5.pop(2)
print(lista5)
# Podemos remover todos os elementos (Zerar a lista)
print(lista5)
lista5.clear()
print(lista5)
# Podemos facilmente repetir elementos em uma lista
nova = [1, 2, 3]
print(nova)
nova = nova * 3
print(nova)
# Podemos facilmente converter uma string para uma lista
# Exemplo 1
curso = 'Programação em Python Essencial'
print(curso)
curso = curso.split()
print(curso)
# OBS: Por padrão, o split separa os elementos da lista pelo espaço entre elas.
# Exemplo 2
curso = 'Programação,em,Python, Essencial'
print(curso)
curso = curso.split(',')
print(curso)
# Convertendo uma lista em uma string
lista6 = ['Programação', 'em', 'Python', 'Essencial']
print(lista6)
# Abaixo estamos falando: Pega a lista6, coloca o cifrão entre cada elemento e transforma em uma string
curso = ' '.join(lista6)
print(curso)
curso = '$'.join(lista6)
print(curso)
# Podemos realmente colocar qualquer tipo de dado em uma lista, inclusive misturando esses dados
lista6 = [1, 2.34, True, 'Geek', 'd', [1, 2, 3], 45345345345]
print(lista6)
print(type(lista6))
# Iterando sobre listas
# Exemplo 1 - Utilizando for
soma = 0
for elemento in lista1:
print(elemento)
soma = soma + elemento
print(soma)
# Exemplo 2 - Utlizando while
carrinho = []
produto = ''
while produto != 'sair':
print("Adicione um produto na lista ou digite 'sair' para sair: ")
produto = input()
if produto != 'sair':
carrinho.append(produto)
for produto in carrinho:
print(produto)
# Utilizando variáveis em listas
numeros = [1, 2, 3, 4, 5]
print(numeros)
num1 = 1
num2 = 2
num3 = 3
num4 = 4
num5 = 5
numeros = [num1, num2, num3, num4, num5]
print(numeros)
# Fazemos acessos aos elementos de forma indexada
cores = ['verde', 'amarelo', 'azul', 'branco']
print(cores[0]) # verde
print(cores[1]) # amarelo
print(cores[2]) # azul
print(cores[3]) # branco
# Fazer acesso aos elementos de forma indexada inversa
# Para entender melhor o índice negativo, pense na lista como um círculo, onde
# o final de um elemento está ligado ao início da lista
print(cores[-1]) # branco
print(cores[-2]) # azul
print(cores[-3]) # amarelo
print(cores[-4]) # verde
for cor in cores:
print(cor)
indice = 0
while indice < len(cores):
print(cores[indice])
indice = indice + 1
cores = ['verde', 'amarelo', 'azul', 'branco']
# Gerar índice em um for
for indice, cor in enumerate(cores):
print(indice, cor)
# Listas aceitam valores repetidos
lista = []
lista.append(42)
lista.append(42)
lista.append(33)
lista.append(33)
lista.append(42)
# Outros métodos não tão importantes mas também úteis
# Encontrar o índice de um elemento na lista
numeros = [5, 6, 7, 5, 8, 9, 10]
# Em qual índice da lista está o valor 6?
print(numeros.index(6))
# Em qual índice da lista está o valor 9??
print(numeros.index(9))
# print(numeros.index(19)) # Gera ValueError
# OBS: Caso não tenha este elemento na lista, será apresentado erro ValueError
# OBS: Retorna o índice do primeiro elemento encontrado
print(numeros.index(5))
# Podemos fazer busca dentro de um range, ou seja, qual índice começar a buscar
print(numeros.index(5, 1)) # Buscando a partir do índice 1
print(numeros.index(5, 2)) # Buscando a partir do índice 2
print(numeros.index(5, 3)) # Buscando a partir do índice 3
# print(numeros.index(5, 4)) # Buscando a partir do índice 4
# OBS: Caso não tenha este elemento na lista, será apresentado erro ValueError
# Podemos fazer busca dentro de um range, início/fim
print(numeros.index(8, 3, 6)) # Buscar o índice do valor 8, entre os índices 3 a 6
# Revisão do slicing
# lista[inicio:fim:passo]
# range(inicio:fim:passo)
# Trabalhando com slice de listas com o parâmetro 'início'
lista = [1, 2, 3, 4]
print(lista[1:]) # Iniciando no índice 1 e pegando todos os elementos restantes
# Trabalhando com slice de listas com o parâmetro 'fim'
print(lista[:2]) # Começa em 0, pega até o índice 2 - 1
print(lista[:4]) # Começa em 0, pega até o índice 4 - 1
print(lista[1:3]) # Começa em 1, pega até o índice 3 - 1
# Trabalhando com slice de listas com o parâmetro 'passo'
print(lista[1::2]) # Começa em 1, vai até o final, de 2 em 2
print(lista[::2]) # Começa em 0, vai até o final, de 2 em 2
# Invertendo valores em uma lista
nomes = ['Geek', 'University']
nomes[0], nomes[1] = nomes[1], nomes[0]
print(nomes)
nomes = ['Geek', 'University']
nomes.reverse()
print(nomes)
# Soma*, Valor Máximo*, Valor Mínimo*, Tamanho
# * Se os valores forem todos inteiros ou reais
lista = [1, 2, 3, 4, 5, 6]
print(sum(lista)) # Soma
print(max(lista)) # Máximo Valor
print(min(lista)) # Mínimo Valor
print(len(lista)) # Tamanho da Lista
# Transformar uma lista em tupla
lista = [1, 2, 3, 4, 5, 6]
print(lista)
print(type(lista))
tupla = tuple(lista)
print(tupla)
print(type(tupla))
# Desempacotamento de listas
listas = [1, 2, 3]
num1, num2, num3 = lista
print(num1)
print(num2)
print(num3)
# OBS: Se tivermos um número diferente de elementos na lista ou variáveis para receber os dados, teremos ValueError
# Copiando uma lista para outra (Shallow Copy e Deep Copy)
# Forma 1 - Deep Copy
lista = [1, 2, 3] e
print(lista)
nova = lista.copy() # Cópia
print(nova)
nova.append(4)
print(lista)
print(nova)
# Veja que ao utilizarmos lista.copy() copiamos os dados da lista para uma nova lista, mas elas
# ficaram totalmente independentes, ou seja, modificando uma lista, não afeta a outra. Isso em Python
# é chamado de Deep Copy (Cópia Profunda)
# Forma 2 - Shallow Copy
lista = [1, 2, 3]
print(lista)
nova = lista # Cópia
print(nova)
nova.append(4)
print(lista)
print(nova)
# Veja que utilizamos a cópia via atribuição e copiamos os dados da lista para a nova lista, mas
# após realizar modificação em uma das listas, essa modificação se refletiu em ambas as listas.
# Isso em Python é chamado de Shallow Copy.
"""
| 25.703812
| 116
| 0.712949
| true
| true
|
|
7906cfc167927fc8aa49f63c83d41f2039162b4e
| 9,396
|
py
|
Python
|
src/sciPENN/Network/Model.py
|
jlakkis/sciPENN
|
34afb2008a076e13c40965a76d3dd31d0c331652
|
[
"MIT"
] | 1
|
2022-02-08T02:33:21.000Z
|
2022-02-08T02:33:21.000Z
|
src/sciPENN/Network/Model.py
|
jlakkis/sciPENN
|
34afb2008a076e13c40965a76d3dd31d0c331652
|
[
"MIT"
] | null | null | null |
src/sciPENN/Network/Model.py
|
jlakkis/sciPENN
|
34afb2008a076e13c40965a76d3dd31d0c331652
|
[
"MIT"
] | null | null | null |
from math import log, exp
from numpy import inf, zeros, zeros_like as np_zeros_like, arange, asarray, empty
from pandas import concat
from anndata import AnnData
from torch import cat, no_grad, randn, zeros_like, zeros as torch_zeros, ones, argmax
from torch.nn import Module, Linear, Sequential, RNNCell, Softplus, Parameter, Softmax
from torch.optim import Adam
from torch.optim.lr_scheduler import StepLR
from .Layers import Input_Block, FF_Block, LambdaLayer, Dual_Forward
class sciPENN_Model(Module):
def __init__(self, p_mod1, p_mod2, loss1, loss2, quantiles, categories):
super(sciPENN_Model, self).__init__()
h_size, drop_rate = 512, 0.25
self.RNNCell = RNNCell(h_size, h_size)
self.input_block = Input_Block(p_mod1, h_size, drop_rate, drop_rate)
self.skip_1 = FF_Block(h_size, drop_rate)
self.skip_2 = FF_Block(h_size, drop_rate)
self.skip_3 = FF_Block(h_size, drop_rate)
MSE_output = Linear(h_size, p_mod2)
if len(quantiles) > 0:
quantile_layer = []
quantile_layer.append(Linear(h_size, p_mod2 * len(quantiles)))
quantile_layer.append(LambdaLayer(lambda x: x.view(-1, p_mod2, len(quantiles))))
quantile_layer = Sequential(*quantile_layer)
self.mod2_out = Dual_Forward(MSE_output, quantile_layer)
else:
self.mod2_out = MSE_output
if categories is not None:
self.celltype_out = Sequential(Linear(h_size, len(categories)), Softmax(1))
self.forward = self.forward_transfer
self.categories_arr = empty((len(categories), ), dtype = 'object')
for cat in categories:
self.categories_arr[categories[cat]] = cat
else:
self.forward = self.forward_simple
self.categories_arr = None
self.quantiles = quantiles
self.loss1, self.loss2 = loss1, loss2
def forward_transfer(self, x):
x = self.input_block(x)
h = self.RNNCell(x, zeros_like(x))
x = self.skip_1(x)
h = self.RNNCell(x, h)
x = self.skip_2(x)
h = self.RNNCell(x, h)
x = self.skip_3(x)
h = self.RNNCell(x, h)
return {'celltypes': self.celltype_out(h.detach()), 'modality 2': self.mod2_out(h), 'embedding': h}
def forward_simple(self, x):
x = self.input_block(x)
h = self.RNNCell(x, zeros_like(x))
x = self.skip_1(x)
h = self.RNNCell(x, h)
x = self.skip_2(x)
h = self.RNNCell(x, h)
x = self.skip_3(x)
h = self.RNNCell(x, h)
return {'celltypes': None, 'modality 2': self.mod2_out(h), 'embedding': h}
def train_backprop(self, train_loader, val_loader,
n_epoch = 10000, ES_max = 30, decay_max = 10, decay_step = 0.1, lr = 10**(-3)):
optimizer = Adam(self.parameters(), lr = lr)
scheduler = StepLR(optimizer, step_size = 1, gamma = decay_step)
patience = 0
bestloss = inf
if self.categories_arr is None:
get_correct = lambda x: 0
else:
get_correct = lambda outputs: (argmax(outputs['celltypes'], axis = 1) == celltypes).sum()
for epoch in range(n_epoch):
with no_grad():
running_loss, rtype_acc = 0., 0.
self.eval()
for batch, inputs in enumerate(val_loader):
mod1, mod2, protein_bools, celltypes = inputs
outputs = self(mod1)
n_correct = get_correct(outputs)
mod2_loss = self.loss2(outputs['modality 2'], mod2, protein_bools)
rtype_acc += n_correct
running_loss += mod2_loss.item() * len(mod2)
if self.categories_arr is None:
print(f"Epoch {epoch} prediction loss = {running_loss/len(val_loader):.3f}")
else:
print(f"Epoch {epoch} prediction loss = {running_loss/len(val_loader):.3f}, validation accuracy = {rtype_acc/len(val_loader):.3f}")
patience += 1
if bestloss/1.005 > running_loss:
bestloss, patience = running_loss, 0
if (patience + 1) % decay_max == 0:
scheduler.step()
print(f"Decaying loss to {optimizer.param_groups[0]['lr']}")
if (patience + 1) > ES_max:
break
self.train()
for batch, inputs in enumerate(train_loader):
optimizer.zero_grad()
mod1, mod2, protein_bools, celltypes = inputs
outputs = self(mod1)
mod1_loss = self.loss1(outputs['celltypes'], celltypes)
mod2_loss = self.loss2(outputs['modality 2'], mod2, protein_bools)
loss = mod1_loss + mod2_loss
loss.backward()
optimizer.step()
def impute(self, impute_loader, requested_quantiles, denoise_genes, proteins):
imputed_test = proteins.copy()
for quantile in requested_quantiles:
imputed_test.layers['q' + str(round(100 * quantile))] = np_zeros_like(imputed_test.X)
self.eval()
start = 0
for mod1, bools, celltypes in impute_loader:
end = start + mod1.shape[0]
with no_grad():
outputs = self(mod1)
if len(self.quantiles) > 0:
mod2_impute, mod2_quantile = outputs['modality 2']
else:
mod2_impute = outputs['modality 2']
imputed_test.X[start:end] = self.fill_predicted(imputed_test.X[start:end], mod2_impute, bools)
for quantile in requested_quantiles:
index = [i for i, q in enumerate(self.quantiles) if quantile == q][0]
q_name = 'q' + str(round(100 * quantile))
imputed_test.layers[q_name][start:end] = mod2_quantile[:, : , index].cpu().numpy()
start = end
return imputed_test
def embed(self, impute_loader, test_loader, cells_train, cells_test):
if cells_test is not None:
embedding = AnnData(zeros(shape = (len(cells_train) + len(cells_test), 512)))
embedding.obs = concat((cells_train, cells_test), join = 'inner')
else:
embedding = AnnData(zeros(shape = (len(cells_train), 512)))
embedding.obs = cells_train
self.eval()
start = 0
for mod1, bools, celltypes in impute_loader:
end = start + mod1.shape[0]
outputs = self(mod1)
embedding[start:end] = outputs['embedding'].detach().cpu().numpy()
start = end
if cells_test is not None:
for mod1 in test_loader:
end = start + mod1.shape[0]
outputs = self(mod1)
embedding[start:end] = outputs['embedding'].detach().cpu().numpy()
start = end
return embedding
def fill_predicted(self, array, predicted, bools):
bools = bools.cpu().numpy()
return (1. - bools) * predicted.cpu().numpy() + array
def predict(self, test_loader, requested_quantiles, denoise_genes, proteins, cells):
imputed_test = AnnData(zeros(shape = (len(cells), len(proteins.var))))
imputed_test.obs = cells
imputed_test.var.index = proteins.var.index
if self.categories_arr is not None:
celltypes = ['None'] * len(cells)
for quantile in requested_quantiles:
imputed_test.layers['q' + str(round(100 * quantile))] = np_zeros_like(imputed_test.X)
self.eval()
start = 0
for mod1 in test_loader:
end = start + mod1.shape[0]
with no_grad():
outputs = self(mod1)
if self.categories_arr is not None:
predicted_types = argmax(outputs['celltypes'], axis = 1).cpu().numpy()
celltypes[start:end] = self.categories_arr[predicted_types].tolist()
if len(self.quantiles) > 0:
mod2_impute, mod2_quantile = outputs['modality 2']
else:
mod2_impute = outputs['modality 2']
imputed_test.X[start:end] = mod2_impute.cpu().numpy()
for quantile in requested_quantiles:
index = [i for i, q in enumerate(self.quantiles) if quantile == q][0]
q_name = 'q' + str(round(100 * quantile))
imputed_test.layers[q_name][start:end] = mod2_quantile[:, : , index].cpu().numpy()
start = end
if self.categories_arr is not None:
imputed_test.obs['transfered cell labels'] = celltypes
return imputed_test
| 37.434263
| 151
| 0.540975
|
from math import log, exp
from numpy import inf, zeros, zeros_like as np_zeros_like, arange, asarray, empty
from pandas import concat
from anndata import AnnData
from torch import cat, no_grad, randn, zeros_like, zeros as torch_zeros, ones, argmax
from torch.nn import Module, Linear, Sequential, RNNCell, Softplus, Parameter, Softmax
from torch.optim import Adam
from torch.optim.lr_scheduler import StepLR
from .Layers import Input_Block, FF_Block, LambdaLayer, Dual_Forward
class sciPENN_Model(Module):
def __init__(self, p_mod1, p_mod2, loss1, loss2, quantiles, categories):
super(sciPENN_Model, self).__init__()
h_size, drop_rate = 512, 0.25
self.RNNCell = RNNCell(h_size, h_size)
self.input_block = Input_Block(p_mod1, h_size, drop_rate, drop_rate)
self.skip_1 = FF_Block(h_size, drop_rate)
self.skip_2 = FF_Block(h_size, drop_rate)
self.skip_3 = FF_Block(h_size, drop_rate)
MSE_output = Linear(h_size, p_mod2)
if len(quantiles) > 0:
quantile_layer = []
quantile_layer.append(Linear(h_size, p_mod2 * len(quantiles)))
quantile_layer.append(LambdaLayer(lambda x: x.view(-1, p_mod2, len(quantiles))))
quantile_layer = Sequential(*quantile_layer)
self.mod2_out = Dual_Forward(MSE_output, quantile_layer)
else:
self.mod2_out = MSE_output
if categories is not None:
self.celltype_out = Sequential(Linear(h_size, len(categories)), Softmax(1))
self.forward = self.forward_transfer
self.categories_arr = empty((len(categories), ), dtype = 'object')
for cat in categories:
self.categories_arr[categories[cat]] = cat
else:
self.forward = self.forward_simple
self.categories_arr = None
self.quantiles = quantiles
self.loss1, self.loss2 = loss1, loss2
def forward_transfer(self, x):
x = self.input_block(x)
h = self.RNNCell(x, zeros_like(x))
x = self.skip_1(x)
h = self.RNNCell(x, h)
x = self.skip_2(x)
h = self.RNNCell(x, h)
x = self.skip_3(x)
h = self.RNNCell(x, h)
return {'celltypes': self.celltype_out(h.detach()), 'modality 2': self.mod2_out(h), 'embedding': h}
def forward_simple(self, x):
x = self.input_block(x)
h = self.RNNCell(x, zeros_like(x))
x = self.skip_1(x)
h = self.RNNCell(x, h)
x = self.skip_2(x)
h = self.RNNCell(x, h)
x = self.skip_3(x)
h = self.RNNCell(x, h)
return {'celltypes': None, 'modality 2': self.mod2_out(h), 'embedding': h}
def train_backprop(self, train_loader, val_loader,
n_epoch = 10000, ES_max = 30, decay_max = 10, decay_step = 0.1, lr = 10**(-3)):
optimizer = Adam(self.parameters(), lr = lr)
scheduler = StepLR(optimizer, step_size = 1, gamma = decay_step)
patience = 0
bestloss = inf
if self.categories_arr is None:
get_correct = lambda x: 0
else:
get_correct = lambda outputs: (argmax(outputs['celltypes'], axis = 1) == celltypes).sum()
for epoch in range(n_epoch):
with no_grad():
running_loss, rtype_acc = 0., 0.
self.eval()
for batch, inputs in enumerate(val_loader):
mod1, mod2, protein_bools, celltypes = inputs
outputs = self(mod1)
n_correct = get_correct(outputs)
mod2_loss = self.loss2(outputs['modality 2'], mod2, protein_bools)
rtype_acc += n_correct
running_loss += mod2_loss.item() * len(mod2)
if self.categories_arr is None:
print(f"Epoch {epoch} prediction loss = {running_loss/len(val_loader):.3f}")
else:
print(f"Epoch {epoch} prediction loss = {running_loss/len(val_loader):.3f}, validation accuracy = {rtype_acc/len(val_loader):.3f}")
patience += 1
if bestloss/1.005 > running_loss:
bestloss, patience = running_loss, 0
if (patience + 1) % decay_max == 0:
scheduler.step()
print(f"Decaying loss to {optimizer.param_groups[0]['lr']}")
if (patience + 1) > ES_max:
break
self.train()
for batch, inputs in enumerate(train_loader):
optimizer.zero_grad()
mod1, mod2, protein_bools, celltypes = inputs
outputs = self(mod1)
mod1_loss = self.loss1(outputs['celltypes'], celltypes)
mod2_loss = self.loss2(outputs['modality 2'], mod2, protein_bools)
loss = mod1_loss + mod2_loss
loss.backward()
optimizer.step()
def impute(self, impute_loader, requested_quantiles, denoise_genes, proteins):
imputed_test = proteins.copy()
for quantile in requested_quantiles:
imputed_test.layers['q' + str(round(100 * quantile))] = np_zeros_like(imputed_test.X)
self.eval()
start = 0
for mod1, bools, celltypes in impute_loader:
end = start + mod1.shape[0]
with no_grad():
outputs = self(mod1)
if len(self.quantiles) > 0:
mod2_impute, mod2_quantile = outputs['modality 2']
else:
mod2_impute = outputs['modality 2']
imputed_test.X[start:end] = self.fill_predicted(imputed_test.X[start:end], mod2_impute, bools)
for quantile in requested_quantiles:
index = [i for i, q in enumerate(self.quantiles) if quantile == q][0]
q_name = 'q' + str(round(100 * quantile))
imputed_test.layers[q_name][start:end] = mod2_quantile[:, : , index].cpu().numpy()
start = end
return imputed_test
def embed(self, impute_loader, test_loader, cells_train, cells_test):
if cells_test is not None:
embedding = AnnData(zeros(shape = (len(cells_train) + len(cells_test), 512)))
embedding.obs = concat((cells_train, cells_test), join = 'inner')
else:
embedding = AnnData(zeros(shape = (len(cells_train), 512)))
embedding.obs = cells_train
self.eval()
start = 0
for mod1, bools, celltypes in impute_loader:
end = start + mod1.shape[0]
outputs = self(mod1)
embedding[start:end] = outputs['embedding'].detach().cpu().numpy()
start = end
if cells_test is not None:
for mod1 in test_loader:
end = start + mod1.shape[0]
outputs = self(mod1)
embedding[start:end] = outputs['embedding'].detach().cpu().numpy()
start = end
return embedding
def fill_predicted(self, array, predicted, bools):
bools = bools.cpu().numpy()
return (1. - bools) * predicted.cpu().numpy() + array
def predict(self, test_loader, requested_quantiles, denoise_genes, proteins, cells):
imputed_test = AnnData(zeros(shape = (len(cells), len(proteins.var))))
imputed_test.obs = cells
imputed_test.var.index = proteins.var.index
if self.categories_arr is not None:
celltypes = ['None'] * len(cells)
for quantile in requested_quantiles:
imputed_test.layers['q' + str(round(100 * quantile))] = np_zeros_like(imputed_test.X)
self.eval()
start = 0
for mod1 in test_loader:
end = start + mod1.shape[0]
with no_grad():
outputs = self(mod1)
if self.categories_arr is not None:
predicted_types = argmax(outputs['celltypes'], axis = 1).cpu().numpy()
celltypes[start:end] = self.categories_arr[predicted_types].tolist()
if len(self.quantiles) > 0:
mod2_impute, mod2_quantile = outputs['modality 2']
else:
mod2_impute = outputs['modality 2']
imputed_test.X[start:end] = mod2_impute.cpu().numpy()
for quantile in requested_quantiles:
index = [i for i, q in enumerate(self.quantiles) if quantile == q][0]
q_name = 'q' + str(round(100 * quantile))
imputed_test.layers[q_name][start:end] = mod2_quantile[:, : , index].cpu().numpy()
start = end
if self.categories_arr is not None:
imputed_test.obs['transfered cell labels'] = celltypes
return imputed_test
| true
| true
|
7906d18dd660bab35f5b0a9479284ab617d70090
| 4,037
|
py
|
Python
|
configs/centernext/paper_cxt18_Ro16_3lr_wd4e4_hm2wh1_s123_nos_2x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
configs/centernext/paper_cxt18_Ro16_3lr_wd4e4_hm2wh1_s123_nos_2x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
configs/centernext/paper_cxt18_Ro16_3lr_wd4e4_hm2wh1_s123_nos_2x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
# model settings
model = dict(
type='CenterNet',
pretrained='modelzoo://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_eval=False,
add_summay_every_n_step=200,
style='pytorch'),
neck=dict(type='None'),
bbox_head=dict(
type='CXTHead',
inplanes=(64, 128, 256, 512),
head_conv=128,
wh_conv=64,
use_deconv=False,
norm_after_upsample=False,
hm_head_conv_num=2,
wh_head_conv_num=1,
ct_head_conv_num=1,
fovea_hm=False,
num_classes=81,
use_exp_wh=False,
wh_offset_base=16,
shortcut_cfg=(1, 2, 3),
shortcut_attention=(False, False, False),
norm_cfg=dict(type='BN'),
norm_wh=False,
avg_wh_weightv3=True,
hm_init_value=None,
giou_weight=5.,
merge_weight=1.,
hm_weight=1.,
ct_weight=1.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.05,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=16,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[18, 22])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=18)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'paper_cxt18_Ro16_3lr_wd4e4_hm2wh1_s123_nos_2x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 30.126866
| 87
| 0.628685
|
model = dict(
type='CenterNet',
pretrained='modelzoo://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_eval=False,
add_summay_every_n_step=200,
style='pytorch'),
neck=dict(type='None'),
bbox_head=dict(
type='CXTHead',
inplanes=(64, 128, 256, 512),
head_conv=128,
wh_conv=64,
use_deconv=False,
norm_after_upsample=False,
hm_head_conv_num=2,
wh_head_conv_num=1,
ct_head_conv_num=1,
fovea_hm=False,
num_classes=81,
use_exp_wh=False,
wh_offset_base=16,
shortcut_cfg=(1, 2, 3),
shortcut_attention=(False, False, False),
norm_cfg=dict(type='BN'),
norm_wh=False,
avg_wh_weightv3=True,
hm_init_value=None,
giou_weight=5.,
merge_weight=1.,
hm_weight=1.,
ct_weight=1.))
cudnn_benchmark = True
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.05,
max_per_img=100)
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=16,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[18, 22])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=18)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
log_config = dict(interval=20)
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'paper_cxt18_Ro16_3lr_wd4e4_hm2wh1_s123_nos_2x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| true
| true
|
7906d1ae2c90175bcee045c784344e55fb521f0b
| 14,451
|
py
|
Python
|
tests/test_workflows.py
|
apikay/celery-director
|
3575e9f89690f6f2518c9939be6169fb4383cbed
|
[
"BSD-3-Clause"
] | 351
|
2020-01-30T14:37:48.000Z
|
2022-03-29T11:34:14.000Z
|
tests/test_workflows.py
|
apikay/celery-director
|
3575e9f89690f6f2518c9939be6169fb4383cbed
|
[
"BSD-3-Clause"
] | 53
|
2020-02-14T17:06:48.000Z
|
2022-03-22T14:37:36.000Z
|
tests/test_workflows.py
|
apikay/celery-director
|
3575e9f89690f6f2518c9939be6169fb4383cbed
|
[
"BSD-3-Clause"
] | 33
|
2020-01-31T14:27:21.000Z
|
2022-03-10T19:50:06.000Z
|
import time
import pytest
from celery.result import GroupResult
from celery.schedules import crontab
from kombu.exceptions import EncodeError
from director import build_celery_schedule
from director.exceptions import WorkflowSyntaxError
from director.models.tasks import Task
from director.models.workflows import Workflow
KEYS = ["id", "created", "updated", "task"]
def test_execute_one_task_success(app, create_builder):
workflow, builder = create_builder("example", "WORKFLOW", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 3
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_EXAMPLE"
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 1
assert tasks[0].key == "TASK_EXAMPLE"
assert tasks[0].status.value == "pending"
# Tasks executed in Celery
result = builder.run()
assert result.get() is None
assert result.parent.parent.get() is None
assert result.parent.get() == "task_example"
assert result.parent.state == "SUCCESS"
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task = Task.query.filter_by(id=tasks[0].id).first()
workflow = Workflow.query.filter_by(id=task.workflow_id).first()
assert workflow.status.value == "success"
assert task.status.value == "success"
def test_execute_one_task_error(app, create_builder):
workflow, builder = create_builder("example", "ERROR", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 3
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_ERROR"
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 1
assert tasks[0].key == "TASK_ERROR"
assert tasks[0].status.value == "pending"
# Tasks executed in Celery
result = builder.run()
with pytest.raises(ZeroDivisionError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task = Task.query.filter_by(id=tasks[0].id).first()
workflow = Workflow.query.filter_by(id=task.workflow_id).first()
assert workflow.status.value == "error"
assert task.status.value == "error"
def test_execute_chain_success(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_CHAIN", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 5
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert [c.task for c in builder.canvas[1:-1]] == ["TASK_A", "TASK_B", "TASK_C"]
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_B", "TASK_C"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
# Tasks executed in Celery
result = builder.run()
assert result.get() is None
assert result.parent.parent.parent.parent.get() is None
assert result.parent.get() == "task_c"
assert result.parent.state == "SUCCESS"
assert result.parent.parent.get() == "task_b"
assert result.parent.parent.state == "SUCCESS"
assert result.parent.parent.parent.get() == "task_a"
assert result.parent.parent.parent.state == "SUCCESS"
# DB rows status updated
time.sleep(0.5)
with app.app_context():
tasks = Task.query.filter_by(id=tasks[0].id).all()
workflow = Workflow.query.filter_by(id=tasks[0].workflow_id).first()
assert workflow.status.value == "success"
for task in tasks:
assert task.status.value == "success"
def test_execute_chain_error(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_CHAIN_ERROR", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 5
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert [c.task for c in builder.canvas[1:-1]] == ["TASK_A", "TASK_B", "TASK_ERROR"]
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_B", "TASK_ERROR"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
# Tasks executed in Celery
result = builder.run()
with pytest.raises(ZeroDivisionError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task_a = Task.query.filter_by(key="TASK_A").first()
task_b = Task.query.filter_by(key="TASK_B").first()
task_error = Task.query.filter_by(key="TASK_ERROR").first()
workflow = Workflow.query.filter_by(id=task_a.workflow_id).first()
assert task_a.status.value == "success"
assert task_b.status.value == "success"
assert task_error.status.value == "error"
assert workflow.status.value == "error"
def test_execute_group_success(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_GROUP", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 4
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_A"
group_tasks = builder.canvas[2].tasks
assert len(group_tasks) == 2
assert [group_tasks[0].task, group_tasks[1].task] == [
"TASK_B",
"TASK_C",
]
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_B", "TASK_C"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
# Tasks executed in Celery
result = builder.run()
assert result.get() is None
assert result.parent.parent.get() == "task_a"
assert isinstance(result.parent, GroupResult)
assert result.parent.get() == ["task_b", "task_c"]
# DB rows status updated
time.sleep(0.5)
with app.app_context():
tasks = Task.query.filter_by(id=tasks[0].id).all()
workflow = Workflow.query.filter_by(id=tasks[0].workflow_id).first()
assert workflow.status.value == "success"
for task in tasks:
assert task.status.value == "success"
def test_execute_group_error(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_GROUP_ERROR", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 4
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_A"
group_tasks = builder.canvas[2].tasks
assert len(group_tasks) == 2
assert [group_tasks[0].task, group_tasks[1].task] == ["TASK_ERROR", "TASK_C"]
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_ERROR", "TASK_C"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
# Tasks executed in Celery
result = builder.run()
with pytest.raises(ZeroDivisionError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task_a = Task.query.filter_by(key="TASK_A").first()
task_error = Task.query.filter_by(key="TASK_ERROR").first()
task_c = Task.query.filter_by(key="TASK_C").first()
workflow = Workflow.query.filter_by(id=task_a.workflow_id).first()
assert task_a.status.value == "success"
assert task_error.status.value == "error"
assert task_c.status.value == "success"
assert workflow.status.value == "error"
@pytest.mark.skip_no_worker()
def test_execute_celery_error_one_task(app, create_builder):
workflow, builder = create_builder("example", "CELERY_ERROR_ONE_TASK", {})
assert workflow["status"] == "pending"
# Tasks executed in Celery
result = builder.run()
with pytest.raises(EncodeError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task = Task.query.order_by(Task.created_at.asc()).first()
workflow = Workflow.query.filter_by(id=task.workflow_id).first()
assert workflow.status.value == "error"
assert task.status.value == "error"
@pytest.mark.skip_no_worker()
def test_execute_celery_error_multiple_tasks(app, create_builder):
workflow, builder = create_builder("example", "CELERY_ERROR_MULTIPLE_TASKS", {})
assert workflow["status"] == "pending"
# Tasks executed in Celery
result = builder.run()
with pytest.raises(EncodeError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task_a = Task.query.filter_by(key="TASK_A").first()
task_celery_error = Task.query.filter_by(key="TASK_CELERY_ERROR").first()
workflow = Workflow.query.filter_by(id=task_a.workflow_id).first()
assert task_a.status.value == "success"
assert task_celery_error.status.value == "error"
assert workflow.status.value == "error"
def test_return_values(app, create_builder):
workflow, builder = create_builder("example", "RETURN_VALUES", {})
result = builder.run()
time.sleep(0.5)
with app.app_context():
tasks = {t.key: t.result for t in Task.query.all()}
assert tasks["STR"] == "return_value"
assert tasks["INT"] == 1234
assert tasks["LIST"] == ["jack", "sape", "guido"]
assert tasks["NONE"] is None
assert tasks["DICT"] == {"foo": "bar"}
assert tasks["NESTED"] == {
"jack": 4098,
"sape": 4139,
"guido": 4127,
"nested": {"foo": "bar"},
"none": None,
"list": ["jack", "sape", "guido"],
}
def test_return_exception(app, create_builder):
workflow, builder = create_builder("example", "RETURN_EXCEPTION", {})
result = builder.run()
time.sleep(0.5)
with app.app_context():
tasks = {t.key: t.result for t in Task.query.all()}
assert tasks["STR"] == "return_value"
assert list(tasks["TASK_ERROR"].keys()) == ["exception", "traceback"]
assert tasks["TASK_ERROR"]["exception"] == "division by zero"
assert tasks["TASK_ERROR"]["traceback"].startswith(
"Traceback (most recent call last)"
)
assert "ZeroDivisionError: division by zero" in tasks["TASK_ERROR"]["traceback"]
def test_build_celery_schedule_float_with_payload():
float_schedule = {"payload": {}, "schedule": 30.0}
assert ("30.0", 30.0) == build_celery_schedule("workflow_schedule_float", float_schedule)
def test_build_celery_schedule_float():
float_schedule = {"schedule": 30.0}
assert ("30.0", 30.0) == build_celery_schedule("workflow_schedule_float", float_schedule)
@pytest.mark.parametrize(
"test_input, expected",
[
("1 * * * *", crontab(minute="1", hour="*", day_of_week="*", day_of_month="*", month_of_year="*")),
("* 1 * * *", crontab(minute="*", hour="1", day_of_week="*", day_of_month="*", month_of_year="*")),
("* * 1 * *", crontab(minute="*", hour="*", day_of_week="1", day_of_month="*", month_of_year="*")),
("* * * 1 *", crontab(minute="*", hour="*", day_of_week="*", day_of_month="1", month_of_year="*")),
("* * * * 1", crontab(minute="*", hour="*", day_of_week="*", day_of_month="*", month_of_year="1")),
(
"*/10 */11 */12 */13 */14",
crontab(minute="*/10", hour="*/11", day_of_week="*/12", day_of_month="*/13", month_of_year="*/14")
)
]
)
def test_build_celery_schedule_crontab(test_input, expected):
cron_schedule = {"schedule": test_input}
assert (test_input, expected) == build_celery_schedule("workflow_crontab", cron_schedule)
def test_build_celery_interval():
float_schedule = {"interval": 30.0}
assert ("30.0", 30.0) == build_celery_schedule("workflow_schedule_float", float_schedule)
@pytest.mark.parametrize(
"test_input, expected",
[
("1 * * * *", crontab(minute="1", hour="*", day_of_month="*", month_of_year="*", day_of_week="*")),
("* 1 * * *", crontab(minute="*", hour="1", day_of_month="*", month_of_year="*", day_of_week="*")),
("* * 1 * *", crontab(minute="*", hour="*", day_of_month="1", month_of_year="*", day_of_week="*")),
("* * * 1 *", crontab(minute="*", hour="*", day_of_month="*", month_of_year="1", day_of_week="*")),
("* * * * 1", crontab(minute="*", hour="*", day_of_month="*", month_of_year="*", day_of_week="1")),
(
"*/10 */11 */12 */13 */14",
crontab(minute="*/10", hour="*/11", day_of_month="*/12", month_of_year="*/13", day_of_week="*/14")
)
]
)
def test_build_celery_crontab(test_input, expected):
cron_schedule = {"crontab": test_input}
assert (test_input, expected) == build_celery_schedule("workflow_crontab", cron_schedule)
def test_build_celery_invalid_crontab():
# missing one element on the crontab syntax
periodic_conf = {"crontab": "* * * *"}
with pytest.raises(WorkflowSyntaxError):
build_celery_schedule("workflow_invalid_crontab", periodic_conf)
def test_build_celery_invalid_schedule():
cron_schedule = {"crontab": "* * * * 12"}
with pytest.raises(WorkflowSyntaxError):
build_celery_schedule("workflow_invalid_crontab", cron_schedule)
def test_build_celery_invalid_periodic_key():
cron_schedule = {"non_valid_key": "* * * * *"}
with pytest.raises(WorkflowSyntaxError):
build_celery_schedule("workflow_invalid_key", cron_schedule)
| 36.959079
| 110
| 0.652135
|
import time
import pytest
from celery.result import GroupResult
from celery.schedules import crontab
from kombu.exceptions import EncodeError
from director import build_celery_schedule
from director.exceptions import WorkflowSyntaxError
from director.models.tasks import Task
from director.models.workflows import Workflow
KEYS = ["id", "created", "updated", "task"]
def test_execute_one_task_success(app, create_builder):
workflow, builder = create_builder("example", "WORKFLOW", {})
assert workflow["status"] == "pending"
assert len(builder.canvas) == 3
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_EXAMPLE"
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 1
assert tasks[0].key == "TASK_EXAMPLE"
assert tasks[0].status.value == "pending"
result = builder.run()
assert result.get() is None
assert result.parent.parent.get() is None
assert result.parent.get() == "task_example"
assert result.parent.state == "SUCCESS"
time.sleep(0.5)
with app.app_context():
task = Task.query.filter_by(id=tasks[0].id).first()
workflow = Workflow.query.filter_by(id=task.workflow_id).first()
assert workflow.status.value == "success"
assert task.status.value == "success"
def test_execute_one_task_error(app, create_builder):
workflow, builder = create_builder("example", "ERROR", {})
assert workflow["status"] == "pending"
assert len(builder.canvas) == 3
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_ERROR"
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 1
assert tasks[0].key == "TASK_ERROR"
assert tasks[0].status.value == "pending"
result = builder.run()
with pytest.raises(ZeroDivisionError):
assert result.get()
time.sleep(0.5)
with app.app_context():
task = Task.query.filter_by(id=tasks[0].id).first()
workflow = Workflow.query.filter_by(id=task.workflow_id).first()
assert workflow.status.value == "error"
assert task.status.value == "error"
def test_execute_chain_success(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_CHAIN", {})
assert workflow["status"] == "pending"
assert len(builder.canvas) == 5
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert [c.task for c in builder.canvas[1:-1]] == ["TASK_A", "TASK_B", "TASK_C"]
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_B", "TASK_C"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
result = builder.run()
assert result.get() is None
assert result.parent.parent.parent.parent.get() is None
assert result.parent.get() == "task_c"
assert result.parent.state == "SUCCESS"
assert result.parent.parent.get() == "task_b"
assert result.parent.parent.state == "SUCCESS"
assert result.parent.parent.parent.get() == "task_a"
assert result.parent.parent.parent.state == "SUCCESS"
time.sleep(0.5)
with app.app_context():
tasks = Task.query.filter_by(id=tasks[0].id).all()
workflow = Workflow.query.filter_by(id=tasks[0].workflow_id).first()
assert workflow.status.value == "success"
for task in tasks:
assert task.status.value == "success"
def test_execute_chain_error(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_CHAIN_ERROR", {})
assert workflow["status"] == "pending"
assert len(builder.canvas) == 5
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert [c.task for c in builder.canvas[1:-1]] == ["TASK_A", "TASK_B", "TASK_ERROR"]
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_B", "TASK_ERROR"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
result = builder.run()
with pytest.raises(ZeroDivisionError):
assert result.get()
time.sleep(0.5)
with app.app_context():
task_a = Task.query.filter_by(key="TASK_A").first()
task_b = Task.query.filter_by(key="TASK_B").first()
task_error = Task.query.filter_by(key="TASK_ERROR").first()
workflow = Workflow.query.filter_by(id=task_a.workflow_id).first()
assert task_a.status.value == "success"
assert task_b.status.value == "success"
assert task_error.status.value == "error"
assert workflow.status.value == "error"
def test_execute_group_success(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_GROUP", {})
assert workflow["status"] == "pending"
assert len(builder.canvas) == 4
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_A"
group_tasks = builder.canvas[2].tasks
assert len(group_tasks) == 2
assert [group_tasks[0].task, group_tasks[1].task] == [
"TASK_B",
"TASK_C",
]
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_B", "TASK_C"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
result = builder.run()
assert result.get() is None
assert result.parent.parent.get() == "task_a"
assert isinstance(result.parent, GroupResult)
assert result.parent.get() == ["task_b", "task_c"]
time.sleep(0.5)
with app.app_context():
tasks = Task.query.filter_by(id=tasks[0].id).all()
workflow = Workflow.query.filter_by(id=tasks[0].workflow_id).first()
assert workflow.status.value == "success"
for task in tasks:
assert task.status.value == "success"
def test_execute_group_error(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_GROUP_ERROR", {})
assert workflow["status"] == "pending"
assert len(builder.canvas) == 4
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_A"
group_tasks = builder.canvas[2].tasks
assert len(group_tasks) == 2
assert [group_tasks[0].task, group_tasks[1].task] == ["TASK_ERROR", "TASK_C"]
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_ERROR", "TASK_C"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
result = builder.run()
with pytest.raises(ZeroDivisionError):
assert result.get()
time.sleep(0.5)
with app.app_context():
task_a = Task.query.filter_by(key="TASK_A").first()
task_error = Task.query.filter_by(key="TASK_ERROR").first()
task_c = Task.query.filter_by(key="TASK_C").first()
workflow = Workflow.query.filter_by(id=task_a.workflow_id).first()
assert task_a.status.value == "success"
assert task_error.status.value == "error"
assert task_c.status.value == "success"
assert workflow.status.value == "error"
@pytest.mark.skip_no_worker()
def test_execute_celery_error_one_task(app, create_builder):
workflow, builder = create_builder("example", "CELERY_ERROR_ONE_TASK", {})
assert workflow["status"] == "pending"
result = builder.run()
with pytest.raises(EncodeError):
assert result.get()
time.sleep(0.5)
with app.app_context():
task = Task.query.order_by(Task.created_at.asc()).first()
workflow = Workflow.query.filter_by(id=task.workflow_id).first()
assert workflow.status.value == "error"
assert task.status.value == "error"
@pytest.mark.skip_no_worker()
def test_execute_celery_error_multiple_tasks(app, create_builder):
workflow, builder = create_builder("example", "CELERY_ERROR_MULTIPLE_TASKS", {})
assert workflow["status"] == "pending"
result = builder.run()
with pytest.raises(EncodeError):
assert result.get()
time.sleep(0.5)
with app.app_context():
task_a = Task.query.filter_by(key="TASK_A").first()
task_celery_error = Task.query.filter_by(key="TASK_CELERY_ERROR").first()
workflow = Workflow.query.filter_by(id=task_a.workflow_id).first()
assert task_a.status.value == "success"
assert task_celery_error.status.value == "error"
assert workflow.status.value == "error"
def test_return_values(app, create_builder):
workflow, builder = create_builder("example", "RETURN_VALUES", {})
result = builder.run()
time.sleep(0.5)
with app.app_context():
tasks = {t.key: t.result for t in Task.query.all()}
assert tasks["STR"] == "return_value"
assert tasks["INT"] == 1234
assert tasks["LIST"] == ["jack", "sape", "guido"]
assert tasks["NONE"] is None
assert tasks["DICT"] == {"foo": "bar"}
assert tasks["NESTED"] == {
"jack": 4098,
"sape": 4139,
"guido": 4127,
"nested": {"foo": "bar"},
"none": None,
"list": ["jack", "sape", "guido"],
}
def test_return_exception(app, create_builder):
workflow, builder = create_builder("example", "RETURN_EXCEPTION", {})
result = builder.run()
time.sleep(0.5)
with app.app_context():
tasks = {t.key: t.result for t in Task.query.all()}
assert tasks["STR"] == "return_value"
assert list(tasks["TASK_ERROR"].keys()) == ["exception", "traceback"]
assert tasks["TASK_ERROR"]["exception"] == "division by zero"
assert tasks["TASK_ERROR"]["traceback"].startswith(
"Traceback (most recent call last)"
)
assert "ZeroDivisionError: division by zero" in tasks["TASK_ERROR"]["traceback"]
def test_build_celery_schedule_float_with_payload():
float_schedule = {"payload": {}, "schedule": 30.0}
assert ("30.0", 30.0) == build_celery_schedule("workflow_schedule_float", float_schedule)
def test_build_celery_schedule_float():
float_schedule = {"schedule": 30.0}
assert ("30.0", 30.0) == build_celery_schedule("workflow_schedule_float", float_schedule)
@pytest.mark.parametrize(
"test_input, expected",
[
("1 * * * *", crontab(minute="1", hour="*", day_of_week="*", day_of_month="*", month_of_year="*")),
("* 1 * * *", crontab(minute="*", hour="1", day_of_week="*", day_of_month="*", month_of_year="*")),
("* * 1 * *", crontab(minute="*", hour="*", day_of_week="1", day_of_month="*", month_of_year="*")),
("* * * 1 *", crontab(minute="*", hour="*", day_of_week="*", day_of_month="1", month_of_year="*")),
("* * * * 1", crontab(minute="*", hour="*", day_of_week="*", day_of_month="*", month_of_year="1")),
(
"*/10 */11 */12 */13 */14",
crontab(minute="*/10", hour="*/11", day_of_week="*/12", day_of_month="*/13", month_of_year="*/14")
)
]
)
def test_build_celery_schedule_crontab(test_input, expected):
cron_schedule = {"schedule": test_input}
assert (test_input, expected) == build_celery_schedule("workflow_crontab", cron_schedule)
def test_build_celery_interval():
float_schedule = {"interval": 30.0}
assert ("30.0", 30.0) == build_celery_schedule("workflow_schedule_float", float_schedule)
@pytest.mark.parametrize(
"test_input, expected",
[
("1 * * * *", crontab(minute="1", hour="*", day_of_month="*", month_of_year="*", day_of_week="*")),
("* 1 * * *", crontab(minute="*", hour="1", day_of_month="*", month_of_year="*", day_of_week="*")),
("* * 1 * *", crontab(minute="*", hour="*", day_of_month="1", month_of_year="*", day_of_week="*")),
("* * * 1 *", crontab(minute="*", hour="*", day_of_month="*", month_of_year="1", day_of_week="*")),
("* * * * 1", crontab(minute="*", hour="*", day_of_month="*", month_of_year="*", day_of_week="1")),
(
"*/10 */11 */12 */13 */14",
crontab(minute="*/10", hour="*/11", day_of_month="*/12", month_of_year="*/13", day_of_week="*/14")
)
]
)
def test_build_celery_crontab(test_input, expected):
cron_schedule = {"crontab": test_input}
assert (test_input, expected) == build_celery_schedule("workflow_crontab", cron_schedule)
def test_build_celery_invalid_crontab():
periodic_conf = {"crontab": "* * * *"}
with pytest.raises(WorkflowSyntaxError):
build_celery_schedule("workflow_invalid_crontab", periodic_conf)
def test_build_celery_invalid_schedule():
cron_schedule = {"crontab": "* * * * 12"}
with pytest.raises(WorkflowSyntaxError):
build_celery_schedule("workflow_invalid_crontab", cron_schedule)
def test_build_celery_invalid_periodic_key():
cron_schedule = {"non_valid_key": "* * * * *"}
with pytest.raises(WorkflowSyntaxError):
build_celery_schedule("workflow_invalid_key", cron_schedule)
| true
| true
|
7906d208ebb0c77cfc9666976e8e1b2c9d6a55d1
| 25,194
|
py
|
Python
|
tensorflow/python/keras/engine/training_eager_test.py
|
decibelcooper/tensorflow
|
e85f387c30384664f1006b3189a30702818ff354
|
[
"Apache-2.0"
] | 54
|
2018-05-29T19:52:44.000Z
|
2021-11-30T10:41:12.000Z
|
tensorflow/python/keras/engine/training_eager_test.py
|
decibelcooper/tensorflow
|
e85f387c30384664f1006b3189a30702818ff354
|
[
"Apache-2.0"
] | 20
|
2017-12-06T18:20:54.000Z
|
2021-11-10T09:54:23.000Z
|
tensorflow/python/keras/engine/training_eager_test.py
|
decibelcooper/tensorflow
|
e85f387c30384664f1006b3189a30702818ff354
|
[
"Apache-2.0"
] | 31
|
2018-09-11T02:17:17.000Z
|
2021-12-15T10:33:35.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python import keras
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training.rmsprop import RMSPropOptimizer
class TrainingTest(test.TestCase):
def test_fit_on_arrays(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
# Test fit at different verbosity
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=1)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=2)
# Test with validation data
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=1,
batch_size=5,
verbose=0)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=2,
batch_size=5,
verbose=1)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=2,
batch_size=5,
verbose=2)
model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])
# Test with validation split
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=0,
validation_split=0.2)
# Test with dictionary inputs
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
epochs=1,
batch_size=5,
verbose=0)
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
epochs=1,
batch_size=5,
verbose=1)
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
validation_data=({'input_a': input_a_np,
'input_b': input_b_np
},
{
'dense': output_d_np,
'dropout': output_e_np
}),
epochs=1,
batch_size=5,
verbose=0)
model.train_on_batch({
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np})
# Test with lists for loss, metrics
loss = ['mae', 'mse']
metrics = ['acc', 'mae']
model.compile(optimizer, loss, metrics=metrics)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
# Test with dictionaries for loss, metrics, loss weights
loss = {'dense': 'mse', 'dropout': 'mae'}
loss_weights = {'dense': 1., 'dropout': 0.5}
metrics = {'dense': 'mse', 'dropout': 'mae'}
model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
# Invalid use cases
with self.assertRaises(AttributeError):
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
validation_data=([input_a_np, input_b_np], 0, 0),
verbose=0)
with self.assertRaises(ValueError):
model.train_on_batch({'input_a': input_a_np},
[output_d_np, output_e_np])
with self.assertRaises(ValueError):
model.train_on_batch([input_a_np], [output_d_np, output_e_np])
with self.assertRaises(AttributeError):
model.train_on_batch(1, [output_d_np, output_e_np])
with self.assertRaises(ValueError):
model.train_on_batch(input_a_np, [output_d_np, output_e_np])
with self.assertRaises(ValueError):
bad_input = np.random.random((11, 3))
model.train_on_batch([bad_input, input_b_np],
[output_d_np, output_e_np])
with self.assertRaises(ValueError):
bad_target = np.random.random((11, 4))
model.train_on_batch([input_a_np, input_b_np],
[bad_target, output_e_np])
# Build single-input model
x = keras.layers.Input(shape=(3,), name='input_a')
y = keras.layers.Dense(4)(x)
model = keras.models.Model(x, y)
model.compile(optimizer=RMSPropOptimizer(learning_rate=0.001), loss='mse')
# This will work
model.fit([input_a_np], output_d_np, epochs=1)
with self.assertRaises(ValueError):
model.fit([input_a_np, input_a_np], output_d_np, epochs=1)
def test_evaluate_predict_on_arrays(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['acc', 'mae']
model.compile(
optimizer,
loss,
metrics=metrics,
loss_weights=loss_weights,
sample_weight_mode=None)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
# Test evaluate at different verbosity
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=0)
self.assertEqual(len(out), 7)
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=1)
self.assertEqual(len(out), 7)
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=2)
self.assertEqual(len(out), 7)
out = model.test_on_batch([input_a_np, input_b_np],
[output_d_np, output_e_np])
self.assertEqual(len(out), 7)
# Test evaluate with dictionary inputs
model.evaluate(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
batch_size=5,
verbose=0)
model.evaluate(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
batch_size=5,
verbose=1)
# Test predict
out = model.predict([input_a_np, input_b_np], batch_size=5)
self.assertEqual(len(out), 2)
out = model.predict({'input_a': input_a_np, 'input_b': input_b_np})
self.assertEqual(len(out), 2)
out = model.predict_on_batch({
'input_a': input_a_np,
'input_b': input_b_np
})
self.assertEqual(len(out), 2)
def test_invalid_loss_or_metrics(self):
num_classes = 5
train_samples = 1000
test_samples = 1000
input_dim = 5
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(num_classes))
model.add(keras.layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(1337)
(x_train, y_train), (_, _) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
with self.assertRaises(ValueError):
model.fit(x_train, np.concatenate([y_train, y_train], axis=-1))
with self.assertRaises(TypeError):
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=set(0))
with self.assertRaises(ValueError):
model.compile(loss=None,
optimizer='rms')
def test_model_methods_with_eager_tensors_multi_io(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['mae']
model.compile(
optimizer,
loss,
metrics=metrics,
loss_weights=loss_weights,
sample_weight_mode=None)
input_a = keras.backend.zeros(shape=(10, 3))
input_b = keras.backend.zeros(shape=(10, 3))
target_d = keras.backend.zeros(shape=(10, 4))
target_e = keras.backend.zeros(shape=(10, 4))
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0)
# Test: no shuffle.
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0,
shuffle=False)
# Test: validation data.
model.fit([input_a, input_b], [target_d, target_e],
epochs=1, batch_size=2, verbose=0,
validation_data=([input_a, input_b], [target_d, target_e]))
model.train_on_batch([input_a, input_b], [target_d, target_e])
model.predict([input_a, input_b], batch_size=5)
model.evaluate([input_a, input_b], [target_d, target_e],
batch_size=2, verbose=0)
model.test_on_batch([input_a, input_b], [target_d, target_e])
# Test: mix np and tensors.
input_b = np.zeros(shape=(10, 3)).astype('float32')
target_e = np.zeros(shape=(10, 4)).astype('float32')
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0)
model.fit([input_a, input_b], [target_d, target_e],
epochs=1, batch_size=2, verbose=0,
validation_data=([input_a, input_b], [target_d, target_e]))
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0,
shuffle=False)
model.train_on_batch([input_a, input_b], [target_d, target_e])
model.predict([input_a, input_b], batch_size=5)
model.evaluate([input_a, input_b], [target_d, target_e],
batch_size=2, verbose=0)
model.test_on_batch([input_a, input_b], [target_d, target_e])
def test_model_methods_with_eager_tensors_single_io(self):
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = keras.backend.zeros(shape=(10, 3))
targets = keras.backend.zeros(shape=(10, 4))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0)
model.fit(inputs, targets, epochs=1, batch_size=3, verbose=0, shuffle=False)
model.fit(inputs, targets, epochs=1, batch_size=4, verbose=0,
validation_data=(inputs, targets))
model.evaluate(inputs, targets, batch_size=2, verbose=0)
model.predict(inputs, batch_size=2)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
class LossWeightingTest(test.TestCase):
def test_class_weights(self):
num_classes = 5
batch_size = 5
weighted_class = 3
train_samples = 300
test_samples = 300
input_dim = 5
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(num_classes))
model.add(keras.layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_test = y_test.copy()
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = 4.
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 4.
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
class_weight=class_weight,
validation_data=(x_train, y_train, sample_weight))
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
class_weight=class_weight)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
class_weight=class_weight,
validation_split=0.1)
model.train_on_batch(
x_train[:batch_size], y_train[:batch_size], class_weight=class_weight)
ref_score = model.evaluate(x_test, y_test, verbose=0)
score = model.evaluate(
x_test[test_ids, :], y_test[test_ids, :], verbose=0)
self.assertLess(score, ref_score)
def test_sample_weights(self):
num_classes = 5
batch_size = 5
weighted_class = 3
train_samples = 300
test_samples = 300
input_dim = 5
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(num_classes))
model.add(keras.layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(43)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_train = y_train.copy()
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = 4.
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 4.
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
sample_weight=sample_weight)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
sample_weight=sample_weight,
validation_split=0.1)
model.train_on_batch(
x_train[:batch_size],
y_train[:batch_size],
sample_weight=sample_weight[:batch_size])
model.test_on_batch(
x_train[:batch_size],
y_train[:batch_size],
sample_weight=sample_weight[:batch_size])
def test_temporal_sample_weights(self):
num_classes = 5
weighted_class = 3
train_samples = 1000
test_samples = 1000
input_dim = 5
timesteps = 3
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
model.add(keras.layers.Activation('softmax'))
np.random.seed(1337)
(_, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = 2.
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 2.
with self.assertRaises(ValueError):
model.compile(
loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
sample_weight_mode='temporal')
def test_class_weight_invalid_use_case(self):
num_classes = 5
train_samples = 1000
test_samples = 1000
input_dim = 5
timesteps = 3
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
model.add(keras.layers.Activation('softmax'))
model.compile(
loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
del class_weight[1]
with self.assertRaises(ValueError):
model.fit(x_train, y_train,
epochs=0, verbose=0, class_weight=class_weight)
with self.assertRaises(ValueError):
model.compile(
loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
sample_weight_mode=[])
# Build multi-output model
x = keras.Input((3,))
y1 = keras.layers.Dense(4, name='1')(x)
y2 = keras.layers.Dense(4, name='2')(x)
model = keras.models.Model(x, [y1, y2])
model.compile(optimizer=RMSPropOptimizer(learning_rate=0.001), loss='mse')
x_np = np.random.random((10, 3))
y_np = np.random.random((10, 4))
w_np = np.random.random((10,))
# This will work
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': w_np})
# These will not
with self.assertRaises(ValueError):
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight=[w_np])
with self.assertRaises(TypeError):
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight=w_np)
with self.assertRaises(ValueError):
bad_w_np = np.random.random((11,))
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': bad_w_np})
with self.assertRaises(ValueError):
bad_w_np = np.random.random((10, 2))
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': bad_w_np})
with self.assertRaises(ValueError):
bad_w_np = np.random.random((10, 2, 2))
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': bad_w_np})
class CorrectnessTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
def test_loss_correctness(self):
# Test that training loss is the same in eager and graph
# (by comparing it to a reference value in a deterministic case)
model = keras.Sequential()
model.add(keras.layers.Dense(3,
activation='relu',
input_dim=4,
kernel_initializer='ones'))
model.add(keras.layers.Dense(2,
activation='softmax',
kernel_initializer='ones'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
x = np.ones((100, 4))
np.random.seed(123)
y = np.random.randint(0, 1, size=(100, 1))
history = model.fit(x, y, epochs=1, batch_size=10)
self.assertEqual(
np.around(history.history['loss'][-1], decimals=4), 0.6173)
@tf_test_util.run_in_graph_and_eager_modes()
def test_metrics_correctness(self):
model = keras.Sequential()
model.add(keras.layers.Dense(3,
activation='relu',
input_dim=4,
kernel_initializer='ones'))
model.add(keras.layers.Dense(1,
activation='sigmoid',
kernel_initializer='ones'))
model.compile(loss='mae',
metrics=['acc'],
optimizer=RMSPropOptimizer(learning_rate=0.001))
x = np.ones((100, 4))
y = np.ones((100, 1))
outs = model.evaluate(x, y)
self.assertEqual(outs[1], 1.)
y = np.zeros((100, 1))
outs = model.evaluate(x, y)
self.assertEqual(outs[1], 0.)
@tf_test_util.run_in_graph_and_eager_modes()
def test_loss_correctness_with_iterator(self):
# Test that training loss is the same in eager and graph
# (by comparing it to a reference value in a deterministic case)
model = keras.Sequential()
model.add(
keras.layers.Dense(
3, activation='relu', input_dim=4, kernel_initializer='ones'))
model.add(
keras.layers.Dense(2, activation='softmax', kernel_initializer='ones'))
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
x = np.ones((100, 4), dtype=np.float32)
np.random.seed(123)
y = np.random.randint(0, 1, size=(100, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
history = model.fit(iterator, epochs=1, steps_per_epoch=10)
self.assertEqual(np.around(history.history['loss'][-1], decimals=4), 0.6173)
@tf_test_util.run_in_graph_and_eager_modes()
def test_metrics_correctness_with_iterator(self):
model = keras.Sequential()
model.add(
keras.layers.Dense(
8, activation='relu', input_dim=4, kernel_initializer='ones'))
model.add(
keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones'))
model.compile(
loss='binary_crossentropy',
metrics=['accuracy'],
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(123)
x = np.random.randint(10, size=(100, 4)).astype(np.float32)
y = np.random.randint(2, size=(100, 1)).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
outs = model.evaluate(iterator, steps=10)
self.assertEqual(np.around(outs[1], decimals=1), 0.5)
y = np.zeros((100, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
outs = model.evaluate(iterator, steps=10)
self.assertEqual(outs[1], 0.)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| 34.512329
| 80
| 0.629237
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python import keras
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training.rmsprop import RMSPropOptimizer
class TrainingTest(test.TestCase):
def test_fit_on_arrays(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=1)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=2)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=1,
batch_size=5,
verbose=0)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=2,
batch_size=5,
verbose=1)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=2,
batch_size=5,
verbose=2)
model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=0,
validation_split=0.2)
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
epochs=1,
batch_size=5,
verbose=0)
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
epochs=1,
batch_size=5,
verbose=1)
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
validation_data=({'input_a': input_a_np,
'input_b': input_b_np
},
{
'dense': output_d_np,
'dropout': output_e_np
}),
epochs=1,
batch_size=5,
verbose=0)
model.train_on_batch({
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np})
loss = ['mae', 'mse']
metrics = ['acc', 'mae']
model.compile(optimizer, loss, metrics=metrics)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
loss = {'dense': 'mse', 'dropout': 'mae'}
loss_weights = {'dense': 1., 'dropout': 0.5}
metrics = {'dense': 'mse', 'dropout': 'mae'}
model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
with self.assertRaises(AttributeError):
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
validation_data=([input_a_np, input_b_np], 0, 0),
verbose=0)
with self.assertRaises(ValueError):
model.train_on_batch({'input_a': input_a_np},
[output_d_np, output_e_np])
with self.assertRaises(ValueError):
model.train_on_batch([input_a_np], [output_d_np, output_e_np])
with self.assertRaises(AttributeError):
model.train_on_batch(1, [output_d_np, output_e_np])
with self.assertRaises(ValueError):
model.train_on_batch(input_a_np, [output_d_np, output_e_np])
with self.assertRaises(ValueError):
bad_input = np.random.random((11, 3))
model.train_on_batch([bad_input, input_b_np],
[output_d_np, output_e_np])
with self.assertRaises(ValueError):
bad_target = np.random.random((11, 4))
model.train_on_batch([input_a_np, input_b_np],
[bad_target, output_e_np])
x = keras.layers.Input(shape=(3,), name='input_a')
y = keras.layers.Dense(4)(x)
model = keras.models.Model(x, y)
model.compile(optimizer=RMSPropOptimizer(learning_rate=0.001), loss='mse')
model.fit([input_a_np], output_d_np, epochs=1)
with self.assertRaises(ValueError):
model.fit([input_a_np, input_a_np], output_d_np, epochs=1)
def test_evaluate_predict_on_arrays(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['acc', 'mae']
model.compile(
optimizer,
loss,
metrics=metrics,
loss_weights=loss_weights,
sample_weight_mode=None)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=0)
self.assertEqual(len(out), 7)
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=1)
self.assertEqual(len(out), 7)
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=2)
self.assertEqual(len(out), 7)
out = model.test_on_batch([input_a_np, input_b_np],
[output_d_np, output_e_np])
self.assertEqual(len(out), 7)
model.evaluate(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
batch_size=5,
verbose=0)
model.evaluate(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
batch_size=5,
verbose=1)
out = model.predict([input_a_np, input_b_np], batch_size=5)
self.assertEqual(len(out), 2)
out = model.predict({'input_a': input_a_np, 'input_b': input_b_np})
self.assertEqual(len(out), 2)
out = model.predict_on_batch({
'input_a': input_a_np,
'input_b': input_b_np
})
self.assertEqual(len(out), 2)
def test_invalid_loss_or_metrics(self):
num_classes = 5
train_samples = 1000
test_samples = 1000
input_dim = 5
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(num_classes))
model.add(keras.layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(1337)
(x_train, y_train), (_, _) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
with self.assertRaises(ValueError):
model.fit(x_train, np.concatenate([y_train, y_train], axis=-1))
with self.assertRaises(TypeError):
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=set(0))
with self.assertRaises(ValueError):
model.compile(loss=None,
optimizer='rms')
def test_model_methods_with_eager_tensors_multi_io(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['mae']
model.compile(
optimizer,
loss,
metrics=metrics,
loss_weights=loss_weights,
sample_weight_mode=None)
input_a = keras.backend.zeros(shape=(10, 3))
input_b = keras.backend.zeros(shape=(10, 3))
target_d = keras.backend.zeros(shape=(10, 4))
target_e = keras.backend.zeros(shape=(10, 4))
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0)
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0,
shuffle=False)
model.fit([input_a, input_b], [target_d, target_e],
epochs=1, batch_size=2, verbose=0,
validation_data=([input_a, input_b], [target_d, target_e]))
model.train_on_batch([input_a, input_b], [target_d, target_e])
model.predict([input_a, input_b], batch_size=5)
model.evaluate([input_a, input_b], [target_d, target_e],
batch_size=2, verbose=0)
model.test_on_batch([input_a, input_b], [target_d, target_e])
input_b = np.zeros(shape=(10, 3)).astype('float32')
target_e = np.zeros(shape=(10, 4)).astype('float32')
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0)
model.fit([input_a, input_b], [target_d, target_e],
epochs=1, batch_size=2, verbose=0,
validation_data=([input_a, input_b], [target_d, target_e]))
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0,
shuffle=False)
model.train_on_batch([input_a, input_b], [target_d, target_e])
model.predict([input_a, input_b], batch_size=5)
model.evaluate([input_a, input_b], [target_d, target_e],
batch_size=2, verbose=0)
model.test_on_batch([input_a, input_b], [target_d, target_e])
def test_model_methods_with_eager_tensors_single_io(self):
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = keras.backend.zeros(shape=(10, 3))
targets = keras.backend.zeros(shape=(10, 4))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0)
model.fit(inputs, targets, epochs=1, batch_size=3, verbose=0, shuffle=False)
model.fit(inputs, targets, epochs=1, batch_size=4, verbose=0,
validation_data=(inputs, targets))
model.evaluate(inputs, targets, batch_size=2, verbose=0)
model.predict(inputs, batch_size=2)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
class LossWeightingTest(test.TestCase):
def test_class_weights(self):
num_classes = 5
batch_size = 5
weighted_class = 3
train_samples = 300
test_samples = 300
input_dim = 5
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(num_classes))
model.add(keras.layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_test = y_test.copy()
int_y_train = y_train.copy()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = 4.
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 4.
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
class_weight=class_weight,
validation_data=(x_train, y_train, sample_weight))
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
class_weight=class_weight)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
class_weight=class_weight,
validation_split=0.1)
model.train_on_batch(
x_train[:batch_size], y_train[:batch_size], class_weight=class_weight)
ref_score = model.evaluate(x_test, y_test, verbose=0)
score = model.evaluate(
x_test[test_ids, :], y_test[test_ids, :], verbose=0)
self.assertLess(score, ref_score)
def test_sample_weights(self):
num_classes = 5
batch_size = 5
weighted_class = 3
train_samples = 300
test_samples = 300
input_dim = 5
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(num_classes))
model.add(keras.layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(43)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_train = y_train.copy()
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = 4.
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 4.
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
sample_weight=sample_weight)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
sample_weight=sample_weight,
validation_split=0.1)
model.train_on_batch(
x_train[:batch_size],
y_train[:batch_size],
sample_weight=sample_weight[:batch_size])
model.test_on_batch(
x_train[:batch_size],
y_train[:batch_size],
sample_weight=sample_weight[:batch_size])
def test_temporal_sample_weights(self):
num_classes = 5
weighted_class = 3
train_samples = 1000
test_samples = 1000
input_dim = 5
timesteps = 3
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
model.add(keras.layers.Activation('softmax'))
np.random.seed(1337)
(_, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_train = y_train.copy()
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = 2.
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 2.
with self.assertRaises(ValueError):
model.compile(
loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
sample_weight_mode='temporal')
def test_class_weight_invalid_use_case(self):
num_classes = 5
train_samples = 1000
test_samples = 1000
input_dim = 5
timesteps = 3
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
model.add(keras.layers.Activation('softmax'))
model.compile(
loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
del class_weight[1]
with self.assertRaises(ValueError):
model.fit(x_train, y_train,
epochs=0, verbose=0, class_weight=class_weight)
with self.assertRaises(ValueError):
model.compile(
loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
sample_weight_mode=[])
x = keras.Input((3,))
y1 = keras.layers.Dense(4, name='1')(x)
y2 = keras.layers.Dense(4, name='2')(x)
model = keras.models.Model(x, [y1, y2])
model.compile(optimizer=RMSPropOptimizer(learning_rate=0.001), loss='mse')
x_np = np.random.random((10, 3))
y_np = np.random.random((10, 4))
w_np = np.random.random((10,))
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': w_np})
with self.assertRaises(ValueError):
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight=[w_np])
with self.assertRaises(TypeError):
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight=w_np)
with self.assertRaises(ValueError):
bad_w_np = np.random.random((11,))
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': bad_w_np})
with self.assertRaises(ValueError):
bad_w_np = np.random.random((10, 2))
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': bad_w_np})
with self.assertRaises(ValueError):
bad_w_np = np.random.random((10, 2, 2))
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': bad_w_np})
class CorrectnessTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
def test_loss_correctness(self):
model = keras.Sequential()
model.add(keras.layers.Dense(3,
activation='relu',
input_dim=4,
kernel_initializer='ones'))
model.add(keras.layers.Dense(2,
activation='softmax',
kernel_initializer='ones'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
x = np.ones((100, 4))
np.random.seed(123)
y = np.random.randint(0, 1, size=(100, 1))
history = model.fit(x, y, epochs=1, batch_size=10)
self.assertEqual(
np.around(history.history['loss'][-1], decimals=4), 0.6173)
@tf_test_util.run_in_graph_and_eager_modes()
def test_metrics_correctness(self):
model = keras.Sequential()
model.add(keras.layers.Dense(3,
activation='relu',
input_dim=4,
kernel_initializer='ones'))
model.add(keras.layers.Dense(1,
activation='sigmoid',
kernel_initializer='ones'))
model.compile(loss='mae',
metrics=['acc'],
optimizer=RMSPropOptimizer(learning_rate=0.001))
x = np.ones((100, 4))
y = np.ones((100, 1))
outs = model.evaluate(x, y)
self.assertEqual(outs[1], 1.)
y = np.zeros((100, 1))
outs = model.evaluate(x, y)
self.assertEqual(outs[1], 0.)
@tf_test_util.run_in_graph_and_eager_modes()
def test_loss_correctness_with_iterator(self):
model = keras.Sequential()
model.add(
keras.layers.Dense(
3, activation='relu', input_dim=4, kernel_initializer='ones'))
model.add(
keras.layers.Dense(2, activation='softmax', kernel_initializer='ones'))
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
x = np.ones((100, 4), dtype=np.float32)
np.random.seed(123)
y = np.random.randint(0, 1, size=(100, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
history = model.fit(iterator, epochs=1, steps_per_epoch=10)
self.assertEqual(np.around(history.history['loss'][-1], decimals=4), 0.6173)
@tf_test_util.run_in_graph_and_eager_modes()
def test_metrics_correctness_with_iterator(self):
model = keras.Sequential()
model.add(
keras.layers.Dense(
8, activation='relu', input_dim=4, kernel_initializer='ones'))
model.add(
keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones'))
model.compile(
loss='binary_crossentropy',
metrics=['accuracy'],
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(123)
x = np.random.randint(10, size=(100, 4)).astype(np.float32)
y = np.random.randint(2, size=(100, 1)).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
outs = model.evaluate(iterator, steps=10)
self.assertEqual(np.around(outs[1], decimals=1), 0.5)
y = np.zeros((100, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
outs = model.evaluate(iterator, steps=10)
self.assertEqual(outs[1], 0.)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| true
| true
|
7906d211cc2d5c07144850ee4ddbdc281ff422df
| 2,423
|
py
|
Python
|
taurus_pyqtgraph/legendtool.py
|
synchrotron-solaris/taurus_pyqtgraph
|
58563d8628dd3e3912d12c406250b0f5d0b9cf08
|
[
"CC-BY-3.0"
] | null | null | null |
taurus_pyqtgraph/legendtool.py
|
synchrotron-solaris/taurus_pyqtgraph
|
58563d8628dd3e3912d12c406250b0f5d0b9cf08
|
[
"CC-BY-3.0"
] | null | null | null |
taurus_pyqtgraph/legendtool.py
|
synchrotron-solaris/taurus_pyqtgraph
|
58563d8628dd3e3912d12c406250b0f5d0b9cf08
|
[
"CC-BY-3.0"
] | null | null | null |
#!/usr/bin/env python
#############################################################################
##
# This file is part of Taurus
##
# http://taurus-scada.org
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Taurus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Taurus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
#############################################################################
__all__ = ["PlotLegendTool"]
from taurus.external.qt import QtGui
from taurus.qt.qtcore.configuration.configuration import BaseConfigurableClass
class PlotLegendTool(QtGui.QWidgetAction, BaseConfigurableClass):
"""
This tool adds a legend to the PlotItem to which it is attached, and it
inserts a checkable menu action for showing/hiding the legend.
Implementation note: this is implemented as a QWidgetAction+QCheckBox
instead of a checkable QAction to avoid closing the menu when toggling it
"""
def __init__(self, parent=None):
BaseConfigurableClass.__init__(self)
QtGui.QWidgetAction.__init__(self, parent)
self._cb = QtGui.QCheckBox()
self._cb.setText('Show legend')
self.setDefaultWidget(self._cb)
self.registerConfigProperty(self._cb.isChecked, self._cb.setChecked,
'checked')
# TODO: register config prop for legend position
self._cb.toggled.connect(self._onToggled)
self._legend = None
def attachToPlotItem(self, plotItem):
"""
Use this method to add this tool to a plot
:param plot_item: (PlotItem)
"""
self._legend = plotItem.addLegend()
self._cb.setChecked(True)
menu = plotItem.getViewBox().menu
menu.addAction(self)
def _onToggled(self, checked):
if checked:
self._legend.show()
else:
self._legend.hide()
| 36.712121
| 78
| 0.647132
| true
| true
|
|
7906d241d502711c52ffe6007f6ff551705e386f
| 1,844
|
py
|
Python
|
tensorflow/contrib/learn/python/learn/ops/dnn_ops.py
|
c0g/tomserflow
|
f7b42f6ba58c3ff20ecd002535d2cca5d93bcf8e
|
[
"Apache-2.0"
] | 2
|
2016-05-25T19:30:35.000Z
|
2016-05-25T20:48:08.000Z
|
tensorflow/contrib/learn/python/learn/ops/dnn_ops.py
|
c0g/tomserflow
|
f7b42f6ba58c3ff20ecd002535d2cca5d93bcf8e
|
[
"Apache-2.0"
] | 1
|
2016-10-19T02:43:04.000Z
|
2016-10-31T14:53:06.000Z
|
tensorflow/contrib/learn/python/learn/ops/dnn_ops.py
|
c0g/tomserflow
|
f7b42f6ba58c3ff20ecd002535d2cca5d93bcf8e
|
[
"Apache-2.0"
] | 8
|
2016-10-23T00:50:02.000Z
|
2019-04-21T11:11:57.000Z
|
"""TensorFlow ops for deep neural networks."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import nn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope as vs
from tensorflow.contrib.learn.python.learn.ops import dropout_ops
def dnn(tensor_in, hidden_units, activation=nn.relu, dropout=None):
"""Creates fully connected deep neural network subgraph.
Args:
tensor_in: tensor or placeholder for input features.
hidden_units: list of counts of hidden units in each layer.
activation: activation function between layers. Can be None.
dropout: if not None, will add a dropout layer with given probability.
Returns:
A tensor which would be a deep neural network.
"""
with vs.variable_scope('dnn'):
for i, n_units in enumerate(hidden_units):
with vs.variable_scope('layer%d' % i):
tensor_in = rnn_cell.linear(tensor_in, n_units, True)
if activation is not None:
tensor_in = activation(tensor_in)
if dropout is not None:
tensor_in = dropout_ops.dropout(tensor_in, prob=(1.0 - dropout))
return tensor_in
| 40.086957
| 75
| 0.748373
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import nn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope as vs
from tensorflow.contrib.learn.python.learn.ops import dropout_ops
def dnn(tensor_in, hidden_units, activation=nn.relu, dropout=None):
with vs.variable_scope('dnn'):
for i, n_units in enumerate(hidden_units):
with vs.variable_scope('layer%d' % i):
tensor_in = rnn_cell.linear(tensor_in, n_units, True)
if activation is not None:
tensor_in = activation(tensor_in)
if dropout is not None:
tensor_in = dropout_ops.dropout(tensor_in, prob=(1.0 - dropout))
return tensor_in
| true
| true
|
7906d2d346f3694f51688dfad1021f614825cc72
| 358
|
py
|
Python
|
term_project/backend/api/admin.py
|
mav10/dataVisualization
|
d3b3d6fc650792a07321f72507b977eaa58c0884
|
[
"MIT"
] | null | null | null |
term_project/backend/api/admin.py
|
mav10/dataVisualization
|
d3b3d6fc650792a07321f72507b977eaa58c0884
|
[
"MIT"
] | null | null | null |
term_project/backend/api/admin.py
|
mav10/dataVisualization
|
d3b3d6fc650792a07321f72507b977eaa58c0884
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Car, CarShop, RepairStation, RepairWork, Reapir, Person, Component
# Register your models here.
admin.site.register(Car)
admin.site.register(CarShop)
admin.site.register(Reapir)
admin.site.register(RepairWork)
admin.site.register(RepairStation)
admin.site.register(Person)
admin.site.register(Component)
| 29.833333
| 86
| 0.818436
|
from django.contrib import admin
from .models import Car, CarShop, RepairStation, RepairWork, Reapir, Person, Component
admin.site.register(Car)
admin.site.register(CarShop)
admin.site.register(Reapir)
admin.site.register(RepairWork)
admin.site.register(RepairStation)
admin.site.register(Person)
admin.site.register(Component)
| true
| true
|
7906d366df1b4f02e9d604481c956ee068de457e
| 193
|
py
|
Python
|
Cython/Fibonacci/functions_folder/setup.py
|
dalexa10/EngineeringDesignOptimization
|
eb5b5e4edd773aef629f59aea8a9771af41bd224
|
[
"MIT"
] | null | null | null |
Cython/Fibonacci/functions_folder/setup.py
|
dalexa10/EngineeringDesignOptimization
|
eb5b5e4edd773aef629f59aea8a9771af41bd224
|
[
"MIT"
] | null | null | null |
Cython/Fibonacci/functions_folder/setup.py
|
dalexa10/EngineeringDesignOptimization
|
eb5b5e4edd773aef629f59aea8a9771af41bd224
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from Cython.Build import cythonize
setup(
name='Fibonacci',
package_dir={'Fibonacci/functions_folder': ''},
ext_modules=cythonize("fib_module.pyx"),
)
| 21.444444
| 51
| 0.735751
|
from setuptools import setup
from Cython.Build import cythonize
setup(
name='Fibonacci',
package_dir={'Fibonacci/functions_folder': ''},
ext_modules=cythonize("fib_module.pyx"),
)
| true
| true
|
7906d434e55925475bee856890e0e7f5ffd82077
| 68,593
|
py
|
Python
|
pynamodb/connection/base.py
|
dwelch91/PynamoDB
|
ae03f5571249206eaf376791e5efb66645e0728b
|
[
"MIT"
] | null | null | null |
pynamodb/connection/base.py
|
dwelch91/PynamoDB
|
ae03f5571249206eaf376791e5efb66645e0728b
|
[
"MIT"
] | null | null | null |
pynamodb/connection/base.py
|
dwelch91/PynamoDB
|
ae03f5571249206eaf376791e5efb66645e0728b
|
[
"MIT"
] | null | null | null |
"""
Lowest level connection
"""
from __future__ import division
import logging
import math
import random
import time
import uuid
import warnings
from base64 import b64decode
from threading import local
import six
from botocore.client import ClientError
from botocore.exceptions import BotoCoreError
from botocore.session import get_session
from botocore.vendored import requests
from botocore.vendored.requests import Request
from six.moves import range
from pynamodb.compat import NullHandler
from pynamodb.connection.util import pythonic
from pynamodb.constants import (
RETURN_CONSUMED_CAPACITY_VALUES, RETURN_ITEM_COLL_METRICS_VALUES, COMPARISON_OPERATOR_VALUES,
RETURN_ITEM_COLL_METRICS, RETURN_CONSUMED_CAPACITY, RETURN_VALUES_VALUES, ATTR_UPDATE_ACTIONS,
COMPARISON_OPERATOR, EXCLUSIVE_START_KEY, SCAN_INDEX_FORWARD, SCAN_FILTER_VALUES, ATTR_DEFINITIONS,
BATCH_WRITE_ITEM, CONSISTENT_READ, ATTR_VALUE_LIST, DESCRIBE_TABLE, KEY_CONDITION_EXPRESSION,
BATCH_GET_ITEM, DELETE_REQUEST, SELECT_VALUES, RETURN_VALUES, REQUEST_ITEMS, ATTR_UPDATES,
PROJECTION_EXPRESSION, SERVICE_NAME, DELETE_ITEM, PUT_REQUEST, UPDATE_ITEM, SCAN_FILTER, TABLE_NAME,
INDEX_NAME, KEY_SCHEMA, ATTR_NAME, ATTR_TYPE, TABLE_KEY, EXPECTED, KEY_TYPE, GET_ITEM, UPDATE,
PUT_ITEM, SELECT, ACTION, EXISTS, VALUE, LIMIT, QUERY, SCAN, ITEM, LOCAL_SECONDARY_INDEXES,
KEYS, KEY, EQ, SEGMENT, TOTAL_SEGMENTS, CREATE_TABLE, PROVISIONED_THROUGHPUT, READ_CAPACITY_UNITS,
WRITE_CAPACITY_UNITS, GLOBAL_SECONDARY_INDEXES, PROJECTION, EXCLUSIVE_START_TABLE_NAME, TOTAL,
DELETE_TABLE, UPDATE_TABLE, LIST_TABLES, GLOBAL_SECONDARY_INDEX_UPDATES, ATTRIBUTES,
CONSUMED_CAPACITY, CAPACITY_UNITS, QUERY_FILTER, QUERY_FILTER_VALUES, CONDITIONAL_OPERATOR,
CONDITIONAL_OPERATORS, NULL, NOT_NULL, SHORT_ATTR_TYPES, DELETE, PUT,
ITEMS, DEFAULT_ENCODING, BINARY_SHORT, BINARY_SET_SHORT, LAST_EVALUATED_KEY, RESPONSES, UNPROCESSED_KEYS,
UNPROCESSED_ITEMS, STREAM_SPECIFICATION, STREAM_VIEW_TYPE, STREAM_ENABLED, UPDATE_EXPRESSION,
EXPRESSION_ATTRIBUTE_NAMES, EXPRESSION_ATTRIBUTE_VALUES, KEY_CONDITION_OPERATOR_MAP,
CONDITION_EXPRESSION, FILTER_EXPRESSION, FILTER_EXPRESSION_OPERATOR_MAP, NOT_CONTAINS, AND)
from pynamodb.exceptions import (
TableError, QueryError, PutError, DeleteError, UpdateError, GetError, ScanError, TableDoesNotExist,
VerboseClientError
)
from pynamodb.expressions.condition import Condition
from pynamodb.expressions.operand import Path
from pynamodb.expressions.projection import create_projection_expression
from pynamodb.expressions.update import Update
from pynamodb.settings import get_settings_value
from pynamodb.signals import pre_dynamodb_send, post_dynamodb_send
from pynamodb.types import HASH, RANGE
BOTOCORE_EXCEPTIONS = (BotoCoreError, ClientError)
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
class MetaTable(object):
"""
A pythonic wrapper around table metadata
"""
def __init__(self, data):
self.data = data or {}
self._range_keyname = None
self._hash_keyname = None
def __repr__(self):
if self.data:
return six.u("MetaTable<{0}>".format(self.data.get(TABLE_NAME)))
@property
def range_keyname(self):
"""
Returns the name of this table's range key
"""
if self._range_keyname is None:
for attr in self.data.get(KEY_SCHEMA):
if attr.get(KEY_TYPE) == RANGE:
self._range_keyname = attr.get(ATTR_NAME)
return self._range_keyname
@property
def hash_keyname(self):
"""
Returns the name of this table's hash key
"""
if self._hash_keyname is None:
for attr in self.data.get(KEY_SCHEMA):
if attr.get(KEY_TYPE) == HASH:
self._hash_keyname = attr.get(ATTR_NAME)
break
return self._hash_keyname
def get_key_names(self, index_name=None):
"""
Returns the names of the primary key attributes and index key attributes (if index_name is specified)
"""
key_names = [self.hash_keyname]
if self.range_keyname:
key_names.append(self.range_keyname)
if index_name is not None:
index_hash_keyname = self.get_index_hash_keyname(index_name)
if index_hash_keyname not in key_names:
key_names.append(index_hash_keyname)
index_range_keyname = self.get_index_range_keyname(index_name)
if index_range_keyname is not None and index_range_keyname not in key_names:
key_names.append(index_range_keyname)
return key_names
def get_index_hash_keyname(self, index_name):
"""
Returns the name of the hash key for a given index
"""
global_indexes = self.data.get(GLOBAL_SECONDARY_INDEXES)
local_indexes = self.data.get(LOCAL_SECONDARY_INDEXES)
indexes = []
if local_indexes:
indexes += local_indexes
if global_indexes:
indexes += global_indexes
for index in indexes:
if index.get(INDEX_NAME) == index_name:
for schema_key in index.get(KEY_SCHEMA):
if schema_key.get(KEY_TYPE) == HASH:
return schema_key.get(ATTR_NAME)
def get_index_range_keyname(self, index_name):
"""
Returns the name of the hash key for a given index
"""
global_indexes = self.data.get(GLOBAL_SECONDARY_INDEXES)
local_indexes = self.data.get(LOCAL_SECONDARY_INDEXES)
indexes = []
if local_indexes:
indexes += local_indexes
if global_indexes:
indexes += global_indexes
for index in indexes:
if index.get(INDEX_NAME) == index_name:
for schema_key in index.get(KEY_SCHEMA):
if schema_key.get(KEY_TYPE) == RANGE:
return schema_key.get(ATTR_NAME)
return None
def get_item_attribute_map(self, attributes, item_key=ITEM, pythonic_key=True):
"""
Builds up a dynamodb compatible AttributeValue map
"""
if pythonic_key:
item_key = item_key
attr_map = {
item_key: {}
}
for key, value in attributes.items():
# In this case, the user provided a mapping
# {'key': {'S': 'value'}}
if isinstance(value, dict):
attr_map[item_key][key] = value
else:
attr_map[item_key][key] = {
self.get_attribute_type(key): value
}
return attr_map
def get_attribute_type(self, attribute_name, value=None):
"""
Returns the proper attribute type for a given attribute name
"""
for attr in self.data.get(ATTR_DEFINITIONS):
if attr.get(ATTR_NAME) == attribute_name:
return attr.get(ATTR_TYPE)
if value is not None and isinstance(value, dict):
for key in SHORT_ATTR_TYPES:
if key in value:
return key
attr_names = [attr.get(ATTR_NAME) for attr in self.data.get(ATTR_DEFINITIONS)]
raise ValueError("No attribute {0} in {1}".format(attribute_name, attr_names))
def get_identifier_map(self, hash_key, range_key=None, key=KEY):
"""
Builds the identifier map that is common to several operations
"""
kwargs = {
key: {
self.hash_keyname: {
self.get_attribute_type(self.hash_keyname): hash_key
}
}
}
if range_key is not None:
kwargs[key][self.range_keyname] = {
self.get_attribute_type(self.range_keyname): range_key
}
return kwargs
def get_exclusive_start_key_map(self, exclusive_start_key):
"""
Builds the exclusive start key attribute map
"""
if isinstance(exclusive_start_key, dict) and self.hash_keyname in exclusive_start_key:
# This is useful when paginating results, as the LastEvaluatedKey returned is already
# structured properly
return {
EXCLUSIVE_START_KEY: exclusive_start_key
}
else:
return {
EXCLUSIVE_START_KEY: {
self.hash_keyname: {
self.get_attribute_type(self.hash_keyname): exclusive_start_key
}
}
}
class Connection(object):
"""
A higher level abstraction over botocore
"""
def __init__(self, region=None, host=None, session_cls=None,
request_timeout_seconds=None, max_retry_attempts=None, base_backoff_ms=None):
self._tables = {}
self.host = host
self._local = local()
self._requests_session = None
self._client = None
if region:
self.region = region
else:
self.region = get_settings_value('region')
if session_cls:
self.session_cls = session_cls
else:
self.session_cls = get_settings_value('session_cls')
if request_timeout_seconds is not None:
self._request_timeout_seconds = request_timeout_seconds
else:
self._request_timeout_seconds = get_settings_value('request_timeout_seconds')
if max_retry_attempts is not None:
self._max_retry_attempts_exception = max_retry_attempts
else:
self._max_retry_attempts_exception = get_settings_value('max_retry_attempts')
if base_backoff_ms is not None:
self._base_backoff_ms = base_backoff_ms
else:
self._base_backoff_ms = get_settings_value('base_backoff_ms')
def __repr__(self):
return six.u("Connection<{0}>".format(self.client.meta.endpoint_url))
def _log_debug(self, operation, kwargs):
"""
Sends a debug message to the logger
"""
log.debug("Calling %s with arguments %s", operation, kwargs)
def _log_debug_response(self, operation, response):
"""
Sends a debug message to the logger about a response
"""
log.debug("%s response: %s", operation, response)
def _log_error(self, operation, response):
"""
Sends an error message to the logger
"""
log.error("%s failed with status: %s, message: %s",
operation, response.status_code,response.content)
def _create_prepared_request(self, request_dict, operation_model):
"""
Create a prepared request object from request_dict, and operation_model
"""
boto_prepared_request = self.client._endpoint.create_request(request_dict, operation_model)
# The call requests_session.send(final_prepared_request) ignores the headers which are
# part of the request session. In order to include the requests session headers inside
# the request, we create a new request object, and call prepare_request with the newly
# created request object
raw_request_with_params = Request(
boto_prepared_request.method,
boto_prepared_request.url,
data=boto_prepared_request.body,
headers=boto_prepared_request.headers
)
return self.requests_session.prepare_request(raw_request_with_params)
def dispatch(self, operation_name, operation_kwargs):
"""
Dispatches `operation_name` with arguments `operation_kwargs`
Raises TableDoesNotExist if the specified table does not exist
"""
if operation_name not in [DESCRIBE_TABLE, LIST_TABLES, UPDATE_TABLE, DELETE_TABLE, CREATE_TABLE]:
if RETURN_CONSUMED_CAPACITY not in operation_kwargs:
operation_kwargs.update(self.get_consumed_capacity_map(TOTAL))
self._log_debug(operation_name, operation_kwargs)
table_name = operation_kwargs.get(TABLE_NAME)
req_uuid = uuid.uuid4()
self.send_pre_boto_callback(operation_name, req_uuid, table_name)
data = self._make_api_call(operation_name, operation_kwargs)
self.send_post_boto_callback(operation_name, req_uuid, table_name)
if data and CONSUMED_CAPACITY in data:
capacity = data.get(CONSUMED_CAPACITY)
if isinstance(capacity, dict) and CAPACITY_UNITS in capacity:
capacity = capacity.get(CAPACITY_UNITS)
log.debug("%s %s consumed %s units", data.get(TABLE_NAME, ''), operation_name, capacity)
return data
def send_post_boto_callback(self, operation_name, req_uuid, table_name):
try:
post_dynamodb_send.send(self, operation_name=operation_name, table_name=table_name, req_uuid=req_uuid)
except Exception as e:
log.exception("post_boto callback threw an exception.")
def send_pre_boto_callback(self, operation_name, req_uuid, table_name):
try:
pre_dynamodb_send.send(self, operation_name=operation_name, table_name=table_name, req_uuid=req_uuid)
except Exception as e:
log.exception("pre_boto callback threw an exception.")
def _make_api_call(self, operation_name, operation_kwargs):
"""
This private method is here for two reasons:
1. It's faster to avoid using botocore's response parsing
2. It provides a place to monkey patch requests for unit testing
"""
operation_model = self.client._service_model.operation_model(operation_name)
request_dict = self.client._convert_to_request_dict(
operation_kwargs,
operation_model
)
prepared_request = self._create_prepared_request(request_dict, operation_model)
for i in range(0, self._max_retry_attempts_exception + 1):
attempt_number = i + 1
is_last_attempt_for_exceptions = i == self._max_retry_attempts_exception
try:
response = self.requests_session.send(
prepared_request,
timeout=self._request_timeout_seconds,
proxies=self.client._endpoint.proxies,
)
data = response.json()
except (requests.RequestException, ValueError) as e:
if is_last_attempt_for_exceptions:
log.debug('Reached the maximum number of retry attempts: %s', attempt_number)
raise
else:
# No backoff for fast-fail exceptions that likely failed at the frontend
log.debug(
'Retry needed for (%s) after attempt %s, retryable %s caught: %s',
operation_name,
attempt_number,
e.__class__.__name__,
e
)
continue
if response.status_code >= 300:
# Extract error code from __type
code = data.get('__type', '')
if '#' in code:
code = code.rsplit('#', 1)[1]
botocore_expected_format = {'Error': {'Message': data.get('message', ''), 'Code': code}}
verbose_properties = {
'request_id': response.headers.get('x-amzn-RequestId')
}
if 'RequestItems' in operation_kwargs:
# Batch operations can hit multiple tables, report them comma separated
verbose_properties['table_name'] = ','.join(operation_kwargs['RequestItems'])
else:
verbose_properties['table_name'] = operation_kwargs.get('TableName')
try:
raise VerboseClientError(botocore_expected_format, operation_name, verbose_properties)
except VerboseClientError as e:
if is_last_attempt_for_exceptions:
log.debug('Reached the maximum number of retry attempts: %s', attempt_number)
raise
elif response.status_code < 500 and code != 'ProvisionedThroughputExceededException':
# We don't retry on a ConditionalCheckFailedException or other 4xx (except for
# throughput related errors) because we assume they will fail in perpetuity.
# Retrying when there is already contention could cause other problems
# in part due to unnecessary consumption of throughput.
raise
else:
# We use fully-jittered exponentially-backed-off retries:
# https://www.awsarchitectureblog.com/2015/03/backoff.html
sleep_time_ms = random.randint(0, self._base_backoff_ms * (2 ** i))
log.debug(
'Retry with backoff needed for (%s) after attempt %s,'
'sleeping for %s milliseconds, retryable %s caught: %s',
operation_name,
attempt_number,
sleep_time_ms,
e.__class__.__name__,
e
)
time.sleep(sleep_time_ms / 1000.0)
continue
return self._handle_binary_attributes(data)
@staticmethod
def _handle_binary_attributes(data):
""" Simulate botocore's binary attribute handling """
if ITEM in data:
for attr in six.itervalues(data[ITEM]):
_convert_binary(attr)
if ITEMS in data:
for item in data[ITEMS]:
for attr in six.itervalues(item):
_convert_binary(attr)
if RESPONSES in data:
for item_list in six.itervalues(data[RESPONSES]):
for item in item_list:
for attr in six.itervalues(item):
_convert_binary(attr)
if LAST_EVALUATED_KEY in data:
for attr in six.itervalues(data[LAST_EVALUATED_KEY]):
_convert_binary(attr)
if UNPROCESSED_KEYS in data:
for table_data in six.itervalues(data[UNPROCESSED_KEYS]):
for item in table_data[KEYS]:
for attr in six.itervalues(item):
_convert_binary(attr)
if UNPROCESSED_ITEMS in data:
for table_unprocessed_requests in six.itervalues(data[UNPROCESSED_ITEMS]):
for request in table_unprocessed_requests:
for item_mapping in six.itervalues(request):
for item in six.itervalues(item_mapping):
for attr in six.itervalues(item):
_convert_binary(attr)
if ATTRIBUTES in data:
for attr in six.itervalues(data[ATTRIBUTES]):
_convert_binary(attr)
return data
@property
def session(self):
"""
Returns a valid botocore session
"""
# botocore client creation is not thread safe as of v1.2.5+ (see issue #153)
if getattr(self._local, 'session', None) is None:
self._local.session = get_session()
return self._local.session
@property
def requests_session(self):
"""
Return a requests session to execute prepared requests using the same pool
"""
if self._requests_session is None:
self._requests_session = self.session_cls()
return self._requests_session
@property
def client(self):
"""
Returns a botocore dynamodb client
"""
# botocore has a known issue where it will cache empty credentials
# https://github.com/boto/botocore/blob/4d55c9b4142/botocore/credentials.py#L1016-L1021
# if the client does not have credentials, we create a new client
# otherwise the client is permanently poisoned in the case of metadata service flakiness when using IAM roles
if not self._client or (self._client._request_signer and not self._client._request_signer._credentials):
self._client = self.session.create_client(SERVICE_NAME, self.region, endpoint_url=self.host)
return self._client
def get_meta_table(self, table_name, refresh=False):
"""
Returns a MetaTable
"""
if table_name not in self._tables or refresh:
operation_kwargs = {
TABLE_NAME: table_name
}
try:
data = self.dispatch(DESCRIBE_TABLE, operation_kwargs)
self._tables[table_name] = MetaTable(data.get(TABLE_KEY))
except BotoCoreError as e:
raise TableError("Unable to describe table: {0}".format(e), e)
except ClientError as e:
if 'ResourceNotFound' in e.response['Error']['Code']:
raise TableDoesNotExist(e.response['Error']['Message'])
else:
raise
return self._tables[table_name]
def create_table(self,
table_name,
attribute_definitions=None,
key_schema=None,
read_capacity_units=None,
write_capacity_units=None,
global_secondary_indexes=None,
local_secondary_indexes=None,
stream_specification=None):
"""
Performs the CreateTable operation
"""
operation_kwargs = {
TABLE_NAME: table_name,
PROVISIONED_THROUGHPUT: {
READ_CAPACITY_UNITS: read_capacity_units,
WRITE_CAPACITY_UNITS: write_capacity_units
}
}
attrs_list = []
if attribute_definitions is None:
raise ValueError("attribute_definitions argument is required")
for attr in attribute_definitions:
attrs_list.append({
ATTR_NAME: attr.get(pythonic(ATTR_NAME)),
ATTR_TYPE: attr.get(pythonic(ATTR_TYPE))
})
operation_kwargs[ATTR_DEFINITIONS] = attrs_list
if global_secondary_indexes:
global_secondary_indexes_list = []
for index in global_secondary_indexes:
global_secondary_indexes_list.append({
INDEX_NAME: index.get(pythonic(INDEX_NAME)),
KEY_SCHEMA: sorted(index.get(pythonic(KEY_SCHEMA)), key=lambda x: x.get(KEY_TYPE)),
PROJECTION: index.get(pythonic(PROJECTION)),
PROVISIONED_THROUGHPUT: index.get(pythonic(PROVISIONED_THROUGHPUT))
})
operation_kwargs[GLOBAL_SECONDARY_INDEXES] = global_secondary_indexes_list
if key_schema is None:
raise ValueError("key_schema is required")
key_schema_list = []
for item in key_schema:
key_schema_list.append({
ATTR_NAME: item.get(pythonic(ATTR_NAME)),
KEY_TYPE: str(item.get(pythonic(KEY_TYPE))).upper()
})
operation_kwargs[KEY_SCHEMA] = sorted(key_schema_list, key=lambda x: x.get(KEY_TYPE))
local_secondary_indexes_list = []
if local_secondary_indexes:
for index in local_secondary_indexes:
local_secondary_indexes_list.append({
INDEX_NAME: index.get(pythonic(INDEX_NAME)),
KEY_SCHEMA: sorted(index.get(pythonic(KEY_SCHEMA)), key=lambda x: x.get(KEY_TYPE)),
PROJECTION: index.get(pythonic(PROJECTION)),
})
operation_kwargs[LOCAL_SECONDARY_INDEXES] = local_secondary_indexes_list
if stream_specification:
operation_kwargs[STREAM_SPECIFICATION] = {
STREAM_ENABLED: stream_specification[pythonic(STREAM_ENABLED)],
STREAM_VIEW_TYPE: stream_specification[pythonic(STREAM_VIEW_TYPE)]
}
try:
data = self.dispatch(CREATE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to create table: {0}".format(e), e)
return data
def delete_table(self, table_name):
"""
Performs the DeleteTable operation
"""
operation_kwargs = {
TABLE_NAME: table_name
}
try:
data = self.dispatch(DELETE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to delete table: {0}".format(e), e)
return data
def update_table(self,
table_name,
read_capacity_units=None,
write_capacity_units=None,
global_secondary_index_updates=None):
"""
Performs the UpdateTable operation
"""
operation_kwargs = {
TABLE_NAME: table_name
}
if read_capacity_units and not write_capacity_units or write_capacity_units and not read_capacity_units:
raise ValueError("read_capacity_units and write_capacity_units are required together")
if read_capacity_units and write_capacity_units:
operation_kwargs[PROVISIONED_THROUGHPUT] = {
READ_CAPACITY_UNITS: read_capacity_units,
WRITE_CAPACITY_UNITS: write_capacity_units
}
if global_secondary_index_updates:
global_secondary_indexes_list = []
for index in global_secondary_index_updates:
global_secondary_indexes_list.append({
UPDATE: {
INDEX_NAME: index.get(pythonic(INDEX_NAME)),
PROVISIONED_THROUGHPUT: {
READ_CAPACITY_UNITS: index.get(pythonic(READ_CAPACITY_UNITS)),
WRITE_CAPACITY_UNITS: index.get(pythonic(WRITE_CAPACITY_UNITS))
}
}
})
operation_kwargs[GLOBAL_SECONDARY_INDEX_UPDATES] = global_secondary_indexes_list
try:
return self.dispatch(UPDATE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to update table: {0}".format(e), e)
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Performs the ListTables operation
"""
operation_kwargs = {}
if exclusive_start_table_name:
operation_kwargs.update({
EXCLUSIVE_START_TABLE_NAME: exclusive_start_table_name
})
if limit is not None:
operation_kwargs.update({
LIMIT: limit
})
try:
return self.dispatch(LIST_TABLES, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Unable to list tables: {0}".format(e), e)
def describe_table(self, table_name):
"""
Performs the DescribeTable operation
"""
try:
tbl = self.get_meta_table(table_name, refresh=True)
if tbl:
return tbl.data
except ValueError:
pass
raise TableDoesNotExist(table_name)
def get_conditional_operator(self, operator):
"""
Returns a dictionary containing the correct conditional operator,
validating it first.
"""
operator = operator.upper()
if operator not in CONDITIONAL_OPERATORS:
raise ValueError(
"The {0} must be one of {1}".format(
CONDITIONAL_OPERATOR,
CONDITIONAL_OPERATORS
)
)
return {
CONDITIONAL_OPERATOR: operator
}
def get_item_attribute_map(self, table_name, attributes, item_key=ITEM, pythonic_key=True):
"""
Builds up a dynamodb compatible AttributeValue map
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_item_attribute_map(
attributes,
item_key=item_key,
pythonic_key=pythonic_key)
def get_expected_map(self, table_name, expected):
"""
Builds the expected map that is common to several operations
"""
kwargs = {EXPECTED: {}}
for key, condition in expected.items():
if EXISTS in condition:
kwargs[EXPECTED][key] = {
EXISTS: condition.get(EXISTS)
}
elif VALUE in condition:
kwargs[EXPECTED][key] = {
VALUE: {
self.get_attribute_type(table_name, key): condition.get(VALUE)
}
}
elif COMPARISON_OPERATOR in condition:
kwargs[EXPECTED][key] = {
COMPARISON_OPERATOR: condition.get(COMPARISON_OPERATOR),
}
values = []
for value in condition.get(ATTR_VALUE_LIST, []):
attr_type = self.get_attribute_type(table_name, key, value)
values.append({attr_type: self.parse_attribute(value)})
if condition.get(COMPARISON_OPERATOR) not in [NULL, NOT_NULL]:
kwargs[EXPECTED][key][ATTR_VALUE_LIST] = values
return kwargs
def parse_attribute(self, attribute, return_type=False):
"""
Returns the attribute value, where the attribute can be
a raw attribute value, or a dictionary containing the type:
{'S': 'String value'}
"""
if isinstance(attribute, dict):
for key in SHORT_ATTR_TYPES:
if key in attribute:
if return_type:
return key, attribute.get(key)
return attribute.get(key)
raise ValueError("Invalid attribute supplied: {0}".format(attribute))
else:
if return_type:
return None, attribute
return attribute
def get_attribute_type(self, table_name, attribute_name, value=None):
"""
Returns the proper attribute type for a given attribute name
:param value: The attribute value an be supplied just in case the type is already included
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_attribute_type(attribute_name, value=value)
def get_identifier_map(self, table_name, hash_key, range_key=None, key=KEY):
"""
Builds the identifier map that is common to several operations
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_identifier_map(hash_key, range_key=range_key, key=key)
def get_query_filter_map(self, table_name, query_filters):
"""
Builds the QueryFilter object needed for the Query operation
"""
kwargs = {
QUERY_FILTER: {}
}
for key, condition in query_filters.items():
operator = condition.get(COMPARISON_OPERATOR)
if operator not in QUERY_FILTER_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, QUERY_FILTER_VALUES))
attr_value_list = []
for value in condition.get(ATTR_VALUE_LIST, []):
attr_value_list.append({
self.get_attribute_type(table_name, key, value): self.parse_attribute(value)
})
kwargs[QUERY_FILTER][key] = {
COMPARISON_OPERATOR: operator
}
if len(attr_value_list):
kwargs[QUERY_FILTER][key][ATTR_VALUE_LIST] = attr_value_list
return kwargs
def get_consumed_capacity_map(self, return_consumed_capacity):
"""
Builds the consumed capacity map that is common to several operations
"""
if return_consumed_capacity.upper() not in RETURN_CONSUMED_CAPACITY_VALUES:
raise ValueError("{0} must be one of {1}".format(RETURN_ITEM_COLL_METRICS, RETURN_CONSUMED_CAPACITY_VALUES))
return {
RETURN_CONSUMED_CAPACITY: str(return_consumed_capacity).upper()
}
def get_return_values_map(self, return_values):
"""
Builds the return values map that is common to several operations
"""
if return_values.upper() not in RETURN_VALUES_VALUES:
raise ValueError("{0} must be one of {1}".format(RETURN_VALUES, RETURN_VALUES_VALUES))
return {
RETURN_VALUES: str(return_values).upper()
}
def get_item_collection_map(self, return_item_collection_metrics):
"""
Builds the item collection map
"""
if return_item_collection_metrics.upper() not in RETURN_ITEM_COLL_METRICS_VALUES:
raise ValueError("{0} must be one of {1}".format(RETURN_ITEM_COLL_METRICS, RETURN_ITEM_COLL_METRICS_VALUES))
return {
RETURN_ITEM_COLL_METRICS: str(return_item_collection_metrics).upper()
}
def get_exclusive_start_key_map(self, table_name, exclusive_start_key):
"""
Builds the exclusive start key attribute map
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_exclusive_start_key_map(exclusive_start_key)
def delete_item(self,
table_name,
hash_key,
range_key=None,
condition=None,
expected=None,
conditional_operator=None,
return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the DeleteItem operation and returns the result
"""
self._check_condition('condition', condition, expected, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key))
name_placeholders = {}
expression_attribute_values = {}
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if return_values:
operation_kwargs.update(self.get_return_values_map(return_values))
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
# We read the conditional operator even without expected passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if expected:
condition_expression = self._get_condition_expression(
table_name, expected, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(DELETE_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise DeleteError("Failed to delete item: {0}".format(e), e)
def update_item(self,
table_name,
hash_key,
range_key=None,
actions=None,
attribute_updates=None,
condition=None,
expected=None,
return_consumed_capacity=None,
conditional_operator=None,
return_item_collection_metrics=None,
return_values=None):
"""
Performs the UpdateItem operation
"""
self._check_actions(actions, attribute_updates)
self._check_condition('condition', condition, expected, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key))
name_placeholders = {}
expression_attribute_values = {}
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
if return_values:
operation_kwargs.update(self.get_return_values_map(return_values))
if not actions and not attribute_updates:
raise ValueError("{0} cannot be empty".format(ATTR_UPDATES))
actions = actions or []
attribute_updates = attribute_updates or {}
update_expression = Update(*actions)
# We sort the keys here for determinism. This is mostly done to simplify testing.
for key in sorted(attribute_updates.keys()):
path = Path([key])
update = attribute_updates[key]
action = update.get(ACTION)
if action not in ATTR_UPDATE_ACTIONS:
raise ValueError("{0} must be one of {1}".format(ACTION, ATTR_UPDATE_ACTIONS))
value = update.get(VALUE)
attr_type, value = self.parse_attribute(value, return_type=True)
if attr_type is None and action != DELETE:
attr_type = self.get_attribute_type(table_name, key, value)
value = {attr_type: value}
if action == DELETE:
action = path.remove() if attr_type is None else path.delete(value)
elif action == PUT:
action = path.set(value)
else:
action = path.add(value)
update_expression.add_action(action)
operation_kwargs[UPDATE_EXPRESSION] = update_expression.serialize(name_placeholders, expression_attribute_values)
# We read the conditional operator even without expected passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if expected:
condition_expression = self._get_condition_expression(
table_name, expected, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(UPDATE_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise UpdateError("Failed to update item: {0}".format(e), e)
def put_item(self,
table_name,
hash_key,
range_key=None,
attributes=None,
condition=None,
expected=None,
conditional_operator=None,
return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the PutItem operation and returns the result
"""
self._check_condition('condition', condition, expected, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key, key=ITEM))
name_placeholders = {}
expression_attribute_values = {}
if attributes:
attrs = self.get_item_attribute_map(table_name, attributes)
operation_kwargs[ITEM].update(attrs[ITEM])
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
if return_values:
operation_kwargs.update(self.get_return_values_map(return_values))
# We read the conditional operator even without expected passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if expected:
condition_expression = self._get_condition_expression(
table_name, expected, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(PUT_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise PutError("Failed to put item: {0}".format(e), e)
def batch_write_item(self,
table_name,
put_items=None,
delete_items=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the batch_write_item operation
"""
if put_items is None and delete_items is None:
raise ValueError("Either put_items or delete_items must be specified")
operation_kwargs = {
REQUEST_ITEMS: {
table_name: []
}
}
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
put_items_list = []
if put_items:
for item in put_items:
put_items_list.append({
PUT_REQUEST: self.get_item_attribute_map(table_name, item, pythonic_key=False)
})
delete_items_list = []
if delete_items:
for item in delete_items:
delete_items_list.append({
DELETE_REQUEST: self.get_item_attribute_map(table_name, item, item_key=KEY, pythonic_key=False)
})
operation_kwargs[REQUEST_ITEMS][table_name] = delete_items_list + put_items_list
try:
return self.dispatch(BATCH_WRITE_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise PutError("Failed to batch write items: {0}".format(e), e)
def batch_get_item(self,
table_name,
keys,
consistent_read=None,
return_consumed_capacity=None,
attributes_to_get=None):
"""
Performs the batch get item operation
"""
operation_kwargs = {
REQUEST_ITEMS: {
table_name: {}
}
}
args_map = {}
name_placeholders = {}
if consistent_read:
args_map[CONSISTENT_READ] = consistent_read
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
args_map[PROJECTION_EXPRESSION] = projection_expression
if name_placeholders:
args_map[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
operation_kwargs[REQUEST_ITEMS][table_name].update(args_map)
keys_map = {KEYS: []}
for key in keys:
keys_map[KEYS].append(
self.get_item_attribute_map(table_name, key)[ITEM]
)
operation_kwargs[REQUEST_ITEMS][table_name].update(keys_map)
try:
return self.dispatch(BATCH_GET_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise GetError("Failed to batch get items: {0}".format(e), e)
def get_item(self,
table_name,
hash_key,
range_key=None,
consistent_read=False,
attributes_to_get=None):
"""
Performs the GetItem operation and returns the result
"""
operation_kwargs = {}
name_placeholders = {}
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
operation_kwargs[CONSISTENT_READ] = consistent_read
operation_kwargs[TABLE_NAME] = table_name
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key))
try:
return self.dispatch(GET_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise GetError("Failed to get item: {0}".format(e), e)
def rate_limited_scan(self,
table_name,
filter_condition=None,
attributes_to_get=None,
page_size=None,
limit=None,
conditional_operator=None,
scan_filter=None,
exclusive_start_key=None,
segment=None,
total_segments=None,
timeout_seconds=None,
read_capacity_to_consume_per_second=10,
allow_rate_limited_scan_without_consumed_capacity=None,
max_sleep_between_retry=10,
max_consecutive_exceptions=10,
consistent_read=None,
index_name=None):
"""
Performs a rate limited scan on the table. The API uses the scan API to fetch items from
DynamoDB. The rate_limited_scan uses the 'ConsumedCapacity' value returned from DynamoDB to
limit the rate of the scan. 'ProvisionedThroughputExceededException' is also handled and retried.
:param table_name: Name of the table to perform scan on.
:param filter_condition: Condition used to restrict the scan results
:param attributes_to_get: A list of attributes to return.
:param page_size: Page size of the scan to DynamoDB
:param limit: Used to limit the number of results returned
:param conditional_operator:
:param scan_filter: A map indicating the condition that evaluates the scan results
:param exclusive_start_key: If set, provides the starting point for scan.
:param segment: If set, then scans the segment
:param total_segments: If set, then specifies total segments
:param timeout_seconds: Timeout value for the rate_limited_scan method, to prevent it from running
infinitely
:param read_capacity_to_consume_per_second: Amount of read capacity to consume
every second
:param allow_rate_limited_scan_without_consumed_capacity: If set, proceeds without rate limiting if
the server does not support returning consumed capacity in responses.
:param max_sleep_between_retry: Max value for sleep in seconds in between scans during
throttling/rate limit scenarios
:param max_consecutive_exceptions: Max number of consecutive ProvisionedThroughputExceededException
exception for scan to exit
:param consistent_read: enable consistent read
:param index_name: an index to perform the scan on
"""
read_capacity_to_consume_per_ms = float(read_capacity_to_consume_per_second) / 1000
if allow_rate_limited_scan_without_consumed_capacity is None:
allow_rate_limited_scan_without_consumed_capacity = get_settings_value(
'allow_rate_limited_scan_without_consumed_capacity'
)
total_consumed_read_capacity = 0.0
last_evaluated_key = exclusive_start_key
rate_available = True
latest_scan_consumed_capacity = 0
consecutive_provision_throughput_exceeded_ex = 0
start_time = time.time()
if page_size is None:
if limit and read_capacity_to_consume_per_second > limit:
page_size = limit
else:
page_size = read_capacity_to_consume_per_second
while True:
if rate_available:
try:
data = self.scan(
table_name,
filter_condition=filter_condition,
attributes_to_get=attributes_to_get,
exclusive_start_key=last_evaluated_key,
limit=page_size,
conditional_operator=conditional_operator,
return_consumed_capacity=TOTAL,
scan_filter=scan_filter,
segment=segment,
total_segments=total_segments,
consistent_read=consistent_read,
index_name=index_name
)
for item in data.get(ITEMS):
yield item
if limit is not None:
limit -= 1
if not limit:
return
if CONSUMED_CAPACITY in data:
latest_scan_consumed_capacity = data.get(CONSUMED_CAPACITY).get(CAPACITY_UNITS)
else:
if allow_rate_limited_scan_without_consumed_capacity:
latest_scan_consumed_capacity = 0
else:
raise ScanError('Rate limited scan not possible because the server did not send back'
'consumed capacity information. If you wish scans to complete anyway'
'without functioning rate limiting, set '
'allow_rate_limited_scan_without_consumed_capacity to True in settings.')
last_evaluated_key = data.get(LAST_EVALUATED_KEY, None)
consecutive_provision_throughput_exceeded_ex = 0
except ScanError as e:
# Only retry if provision throughput is exceeded.
if isinstance(e.cause, ClientError):
code = e.cause.response['Error'].get('Code')
if code == "ProvisionedThroughputExceededException":
consecutive_provision_throughput_exceeded_ex += 1
if consecutive_provision_throughput_exceeded_ex > max_consecutive_exceptions:
# Max threshold reached
raise
else:
# Different exception, other than ProvisionedThroughputExceededException
raise
else:
# Not a Client error
raise
# No throttling, and no more scans needed. Just return
if not last_evaluated_key and consecutive_provision_throughput_exceeded_ex == 0:
return
current_time = time.time()
# elapsed_time_ms indicates the time taken in ms from the start of the
# throttled_scan call.
elapsed_time_ms = max(1, round((current_time - start_time) * 1000))
if consecutive_provision_throughput_exceeded_ex == 0:
total_consumed_read_capacity += latest_scan_consumed_capacity
consumed_rate = total_consumed_read_capacity / elapsed_time_ms
rate_available = (read_capacity_to_consume_per_ms - consumed_rate) >= 0
# consecutive_provision_throughput_exceeded_ex > 0 indicates ProvisionedThroughputExceededException occurred.
# ProvisionedThroughputExceededException can occur if:
# - The rate to consume is passed incorrectly.
# - External factors, even if the current scan is within limits.
if not rate_available or (consecutive_provision_throughput_exceeded_ex > 0):
# Minimum value is 1 second.
elapsed_time_s = math.ceil(elapsed_time_ms / 1000)
# Sleep proportional to the ratio of --consumed capacity-- to --capacity to consume--
time_to_sleep = max(1, round((total_consumed_read_capacity/ elapsed_time_s) \
/ read_capacity_to_consume_per_second))
# At any moment if the timeout_seconds hits, then return
if timeout_seconds and (elapsed_time_s + time_to_sleep) > timeout_seconds:
raise ScanError("Input timeout value {0} has expired".format(timeout_seconds))
time.sleep(min(math.ceil(time_to_sleep), max_sleep_between_retry))
# Reset the latest_scan_consumed_capacity, as no scan operation was performed.
latest_scan_consumed_capacity = 0
def scan(self,
table_name,
filter_condition=None,
attributes_to_get=None,
limit=None,
conditional_operator=None,
scan_filter=None,
return_consumed_capacity=None,
exclusive_start_key=None,
segment=None,
total_segments=None,
consistent_read=None,
index_name=None):
"""
Performs the scan operation
"""
self._check_condition('filter_condition', filter_condition, scan_filter, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
name_placeholders = {}
expression_attribute_values = {}
if filter_condition is not None:
filter_expression = filter_condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if index_name:
operation_kwargs[INDEX_NAME] = index_name
if limit is not None:
operation_kwargs[LIMIT] = limit
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if exclusive_start_key:
operation_kwargs.update(self.get_exclusive_start_key_map(table_name, exclusive_start_key))
if segment is not None:
operation_kwargs[SEGMENT] = segment
if total_segments:
operation_kwargs[TOTAL_SEGMENTS] = total_segments
if scan_filter:
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
filter_expression = self._get_filter_expression(
table_name, scan_filter, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if consistent_read:
operation_kwargs[CONSISTENT_READ] = consistent_read
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(SCAN, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise ScanError("Failed to scan table: {0}".format(e), e)
def query(self,
table_name,
hash_key,
range_key_condition=None,
filter_condition=None,
attributes_to_get=None,
consistent_read=False,
exclusive_start_key=None,
index_name=None,
key_conditions=None,
query_filters=None,
conditional_operator=None,
limit=None,
return_consumed_capacity=None,
scan_index_forward=None,
select=None):
"""
Performs the Query operation and returns the result
"""
self._check_condition('range_key_condition', range_key_condition, key_conditions, conditional_operator)
self._check_condition('filter_condition', filter_condition, query_filters, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
name_placeholders = {}
expression_attribute_values = {}
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table: {0}".format(table_name))
if index_name:
hash_keyname = tbl.get_index_hash_keyname(index_name)
if not hash_keyname:
raise ValueError("No hash key attribute for index: {0}".format(index_name))
range_keyname = tbl.get_index_range_keyname(index_name)
else:
hash_keyname = tbl.hash_keyname
range_keyname = tbl.range_keyname
key_condition = self._get_condition(table_name, hash_keyname, '__eq__', hash_key)
if range_key_condition is not None:
if range_key_condition.is_valid_range_key_condition(range_keyname):
key_condition = key_condition & range_key_condition
elif filter_condition is None:
# Try to gracefully handle the case where a user passed in a filter as a range key condition
(filter_condition, range_key_condition) = (range_key_condition, None)
else:
raise ValueError("{0} is not a valid range key condition".format(range_key_condition))
if key_conditions is None or len(key_conditions) == 0:
pass # No comparisons on sort key
elif len(key_conditions) > 1:
raise ValueError("Multiple attributes are not supported in key_conditions: {0}".format(key_conditions))
else:
(key, condition), = key_conditions.items()
operator = condition.get(COMPARISON_OPERATOR)
if operator not in COMPARISON_OPERATOR_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, COMPARISON_OPERATOR_VALUES))
operator = KEY_CONDITION_OPERATOR_MAP[operator]
values = condition.get(ATTR_VALUE_LIST)
sort_key_expression = self._get_condition(table_name, key, operator, *values)
key_condition = key_condition & sort_key_expression
operation_kwargs[KEY_CONDITION_EXPRESSION] = key_condition.serialize(
name_placeholders, expression_attribute_values)
if filter_condition is not None:
filter_expression = filter_condition.serialize(name_placeholders, expression_attribute_values)
# FilterExpression does not allow key attributes. Check for hash and range key name placeholders
hash_key_placeholder = name_placeholders.get(hash_keyname)
range_key_placeholder = range_keyname and name_placeholders.get(range_keyname)
if (
hash_key_placeholder in filter_expression or
(range_key_placeholder and range_key_placeholder in filter_expression)
):
raise ValueError("'filter_condition' cannot contain key attributes")
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if attributes_to_get:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if consistent_read:
operation_kwargs[CONSISTENT_READ] = True
if exclusive_start_key:
operation_kwargs.update(self.get_exclusive_start_key_map(table_name, exclusive_start_key))
if index_name:
operation_kwargs[INDEX_NAME] = index_name
if limit is not None:
operation_kwargs[LIMIT] = limit
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
# We read the conditional operator even without a query filter passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if query_filters:
filter_expression = self._get_filter_expression(
table_name, query_filters, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if select:
if select.upper() not in SELECT_VALUES:
raise ValueError("{0} must be one of {1}".format(SELECT, SELECT_VALUES))
operation_kwargs[SELECT] = str(select).upper()
if scan_index_forward is not None:
operation_kwargs[SCAN_INDEX_FORWARD] = scan_index_forward
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(QUERY, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise QueryError("Failed to query items: {0}".format(e), e)
def _get_condition_expression(self, table_name, expected, conditional_operator,
name_placeholders, expression_attribute_values):
"""
Builds the ConditionExpression needed for DeleteItem, PutItem, and UpdateItem operations
"""
condition_expression = None
conditional_operator = conditional_operator[CONDITIONAL_OPERATOR]
# We sort the keys here for determinism. This is mostly done to simplify testing.
for key in sorted(expected.keys()):
condition = expected[key]
if EXISTS in condition:
operator = NOT_NULL if condition.get(EXISTS, True) else NULL
values = []
elif VALUE in condition:
operator = EQ
values = [condition.get(VALUE)]
else:
operator = condition.get(COMPARISON_OPERATOR)
values = condition.get(ATTR_VALUE_LIST, [])
if operator not in QUERY_FILTER_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, QUERY_FILTER_VALUES))
not_contains = operator == NOT_CONTAINS
operator = FILTER_EXPRESSION_OPERATOR_MAP[operator]
condition = self._get_condition(table_name, key, operator, *values)
if not_contains:
condition = ~condition
if condition_expression is None:
condition_expression = condition
elif conditional_operator == AND:
condition_expression = condition_expression & condition
else:
condition_expression = condition_expression | condition
return condition_expression.serialize(name_placeholders, expression_attribute_values)
def _get_filter_expression(self, table_name, filters, conditional_operator,
name_placeholders, expression_attribute_values):
"""
Builds the FilterExpression needed for Query and Scan operations
"""
condition_expression = None
conditional_operator = conditional_operator[CONDITIONAL_OPERATOR]
# We sort the keys here for determinism. This is mostly done to simplify testing.
for key in sorted(filters.keys()):
condition = filters[key]
operator = condition.get(COMPARISON_OPERATOR)
if operator not in QUERY_FILTER_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, QUERY_FILTER_VALUES))
not_contains = operator == NOT_CONTAINS
operator = FILTER_EXPRESSION_OPERATOR_MAP[operator]
values = condition.get(ATTR_VALUE_LIST, [])
condition = self._get_condition(table_name, key, operator, *values)
if not_contains:
condition = ~condition
if condition_expression is None:
condition_expression = condition
elif conditional_operator == AND:
condition_expression = condition_expression & condition
else:
condition_expression = condition_expression | condition
return condition_expression.serialize(name_placeholders, expression_attribute_values)
def _get_condition(self, table_name, attribute_name, operator, *values):
values = [
{self.get_attribute_type(table_name, attribute_name, value): self.parse_attribute(value)}
for value in values
]
return getattr(Path([attribute_name]), operator)(*values)
def _check_actions(self, actions, attribute_updates):
if actions is not None:
if attribute_updates is not None:
raise ValueError("Legacy attribute updates cannot be used with update actions")
else:
if attribute_updates is not None:
warnings.warn("Legacy attribute updates are deprecated in favor of update actions")
def _check_condition(self, name, condition, expected_or_filter, conditional_operator):
if condition is not None:
if not isinstance(condition, Condition):
raise ValueError("'{0}' must be an instance of Condition".format(name))
if expected_or_filter or conditional_operator is not None:
raise ValueError("Legacy conditional parameters cannot be used with condition expressions")
else:
if expected_or_filter or conditional_operator is not None:
warnings.warn("Legacy conditional parameters are deprecated in favor of condition expressions")
@staticmethod
def _reverse_dict(d):
return dict((v, k) for k, v in six.iteritems(d))
def _convert_binary(attr):
if BINARY_SHORT in attr:
attr[BINARY_SHORT] = b64decode(attr[BINARY_SHORT].encode(DEFAULT_ENCODING))
elif BINARY_SET_SHORT in attr:
value = attr[BINARY_SET_SHORT]
if value and len(value):
attr[BINARY_SET_SHORT] = set(b64decode(v.encode(DEFAULT_ENCODING)) for v in value)
| 45.698201
| 121
| 0.627469
|
from __future__ import division
import logging
import math
import random
import time
import uuid
import warnings
from base64 import b64decode
from threading import local
import six
from botocore.client import ClientError
from botocore.exceptions import BotoCoreError
from botocore.session import get_session
from botocore.vendored import requests
from botocore.vendored.requests import Request
from six.moves import range
from pynamodb.compat import NullHandler
from pynamodb.connection.util import pythonic
from pynamodb.constants import (
RETURN_CONSUMED_CAPACITY_VALUES, RETURN_ITEM_COLL_METRICS_VALUES, COMPARISON_OPERATOR_VALUES,
RETURN_ITEM_COLL_METRICS, RETURN_CONSUMED_CAPACITY, RETURN_VALUES_VALUES, ATTR_UPDATE_ACTIONS,
COMPARISON_OPERATOR, EXCLUSIVE_START_KEY, SCAN_INDEX_FORWARD, SCAN_FILTER_VALUES, ATTR_DEFINITIONS,
BATCH_WRITE_ITEM, CONSISTENT_READ, ATTR_VALUE_LIST, DESCRIBE_TABLE, KEY_CONDITION_EXPRESSION,
BATCH_GET_ITEM, DELETE_REQUEST, SELECT_VALUES, RETURN_VALUES, REQUEST_ITEMS, ATTR_UPDATES,
PROJECTION_EXPRESSION, SERVICE_NAME, DELETE_ITEM, PUT_REQUEST, UPDATE_ITEM, SCAN_FILTER, TABLE_NAME,
INDEX_NAME, KEY_SCHEMA, ATTR_NAME, ATTR_TYPE, TABLE_KEY, EXPECTED, KEY_TYPE, GET_ITEM, UPDATE,
PUT_ITEM, SELECT, ACTION, EXISTS, VALUE, LIMIT, QUERY, SCAN, ITEM, LOCAL_SECONDARY_INDEXES,
KEYS, KEY, EQ, SEGMENT, TOTAL_SEGMENTS, CREATE_TABLE, PROVISIONED_THROUGHPUT, READ_CAPACITY_UNITS,
WRITE_CAPACITY_UNITS, GLOBAL_SECONDARY_INDEXES, PROJECTION, EXCLUSIVE_START_TABLE_NAME, TOTAL,
DELETE_TABLE, UPDATE_TABLE, LIST_TABLES, GLOBAL_SECONDARY_INDEX_UPDATES, ATTRIBUTES,
CONSUMED_CAPACITY, CAPACITY_UNITS, QUERY_FILTER, QUERY_FILTER_VALUES, CONDITIONAL_OPERATOR,
CONDITIONAL_OPERATORS, NULL, NOT_NULL, SHORT_ATTR_TYPES, DELETE, PUT,
ITEMS, DEFAULT_ENCODING, BINARY_SHORT, BINARY_SET_SHORT, LAST_EVALUATED_KEY, RESPONSES, UNPROCESSED_KEYS,
UNPROCESSED_ITEMS, STREAM_SPECIFICATION, STREAM_VIEW_TYPE, STREAM_ENABLED, UPDATE_EXPRESSION,
EXPRESSION_ATTRIBUTE_NAMES, EXPRESSION_ATTRIBUTE_VALUES, KEY_CONDITION_OPERATOR_MAP,
CONDITION_EXPRESSION, FILTER_EXPRESSION, FILTER_EXPRESSION_OPERATOR_MAP, NOT_CONTAINS, AND)
from pynamodb.exceptions import (
TableError, QueryError, PutError, DeleteError, UpdateError, GetError, ScanError, TableDoesNotExist,
VerboseClientError
)
from pynamodb.expressions.condition import Condition
from pynamodb.expressions.operand import Path
from pynamodb.expressions.projection import create_projection_expression
from pynamodb.expressions.update import Update
from pynamodb.settings import get_settings_value
from pynamodb.signals import pre_dynamodb_send, post_dynamodb_send
from pynamodb.types import HASH, RANGE
BOTOCORE_EXCEPTIONS = (BotoCoreError, ClientError)
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
class MetaTable(object):
def __init__(self, data):
self.data = data or {}
self._range_keyname = None
self._hash_keyname = None
def __repr__(self):
if self.data:
return six.u("MetaTable<{0}>".format(self.data.get(TABLE_NAME)))
@property
def range_keyname(self):
if self._range_keyname is None:
for attr in self.data.get(KEY_SCHEMA):
if attr.get(KEY_TYPE) == RANGE:
self._range_keyname = attr.get(ATTR_NAME)
return self._range_keyname
@property
def hash_keyname(self):
if self._hash_keyname is None:
for attr in self.data.get(KEY_SCHEMA):
if attr.get(KEY_TYPE) == HASH:
self._hash_keyname = attr.get(ATTR_NAME)
break
return self._hash_keyname
def get_key_names(self, index_name=None):
key_names = [self.hash_keyname]
if self.range_keyname:
key_names.append(self.range_keyname)
if index_name is not None:
index_hash_keyname = self.get_index_hash_keyname(index_name)
if index_hash_keyname not in key_names:
key_names.append(index_hash_keyname)
index_range_keyname = self.get_index_range_keyname(index_name)
if index_range_keyname is not None and index_range_keyname not in key_names:
key_names.append(index_range_keyname)
return key_names
def get_index_hash_keyname(self, index_name):
global_indexes = self.data.get(GLOBAL_SECONDARY_INDEXES)
local_indexes = self.data.get(LOCAL_SECONDARY_INDEXES)
indexes = []
if local_indexes:
indexes += local_indexes
if global_indexes:
indexes += global_indexes
for index in indexes:
if index.get(INDEX_NAME) == index_name:
for schema_key in index.get(KEY_SCHEMA):
if schema_key.get(KEY_TYPE) == HASH:
return schema_key.get(ATTR_NAME)
def get_index_range_keyname(self, index_name):
global_indexes = self.data.get(GLOBAL_SECONDARY_INDEXES)
local_indexes = self.data.get(LOCAL_SECONDARY_INDEXES)
indexes = []
if local_indexes:
indexes += local_indexes
if global_indexes:
indexes += global_indexes
for index in indexes:
if index.get(INDEX_NAME) == index_name:
for schema_key in index.get(KEY_SCHEMA):
if schema_key.get(KEY_TYPE) == RANGE:
return schema_key.get(ATTR_NAME)
return None
def get_item_attribute_map(self, attributes, item_key=ITEM, pythonic_key=True):
if pythonic_key:
item_key = item_key
attr_map = {
item_key: {}
}
for key, value in attributes.items():
if isinstance(value, dict):
attr_map[item_key][key] = value
else:
attr_map[item_key][key] = {
self.get_attribute_type(key): value
}
return attr_map
def get_attribute_type(self, attribute_name, value=None):
for attr in self.data.get(ATTR_DEFINITIONS):
if attr.get(ATTR_NAME) == attribute_name:
return attr.get(ATTR_TYPE)
if value is not None and isinstance(value, dict):
for key in SHORT_ATTR_TYPES:
if key in value:
return key
attr_names = [attr.get(ATTR_NAME) for attr in self.data.get(ATTR_DEFINITIONS)]
raise ValueError("No attribute {0} in {1}".format(attribute_name, attr_names))
def get_identifier_map(self, hash_key, range_key=None, key=KEY):
kwargs = {
key: {
self.hash_keyname: {
self.get_attribute_type(self.hash_keyname): hash_key
}
}
}
if range_key is not None:
kwargs[key][self.range_keyname] = {
self.get_attribute_type(self.range_keyname): range_key
}
return kwargs
def get_exclusive_start_key_map(self, exclusive_start_key):
if isinstance(exclusive_start_key, dict) and self.hash_keyname in exclusive_start_key:
return {
EXCLUSIVE_START_KEY: exclusive_start_key
}
else:
return {
EXCLUSIVE_START_KEY: {
self.hash_keyname: {
self.get_attribute_type(self.hash_keyname): exclusive_start_key
}
}
}
class Connection(object):
def __init__(self, region=None, host=None, session_cls=None,
request_timeout_seconds=None, max_retry_attempts=None, base_backoff_ms=None):
self._tables = {}
self.host = host
self._local = local()
self._requests_session = None
self._client = None
if region:
self.region = region
else:
self.region = get_settings_value('region')
if session_cls:
self.session_cls = session_cls
else:
self.session_cls = get_settings_value('session_cls')
if request_timeout_seconds is not None:
self._request_timeout_seconds = request_timeout_seconds
else:
self._request_timeout_seconds = get_settings_value('request_timeout_seconds')
if max_retry_attempts is not None:
self._max_retry_attempts_exception = max_retry_attempts
else:
self._max_retry_attempts_exception = get_settings_value('max_retry_attempts')
if base_backoff_ms is not None:
self._base_backoff_ms = base_backoff_ms
else:
self._base_backoff_ms = get_settings_value('base_backoff_ms')
def __repr__(self):
return six.u("Connection<{0}>".format(self.client.meta.endpoint_url))
def _log_debug(self, operation, kwargs):
log.debug("Calling %s with arguments %s", operation, kwargs)
def _log_debug_response(self, operation, response):
log.debug("%s response: %s", operation, response)
def _log_error(self, operation, response):
log.error("%s failed with status: %s, message: %s",
operation, response.status_code,response.content)
def _create_prepared_request(self, request_dict, operation_model):
boto_prepared_request = self.client._endpoint.create_request(request_dict, operation_model)
raw_request_with_params = Request(
boto_prepared_request.method,
boto_prepared_request.url,
data=boto_prepared_request.body,
headers=boto_prepared_request.headers
)
return self.requests_session.prepare_request(raw_request_with_params)
def dispatch(self, operation_name, operation_kwargs):
if operation_name not in [DESCRIBE_TABLE, LIST_TABLES, UPDATE_TABLE, DELETE_TABLE, CREATE_TABLE]:
if RETURN_CONSUMED_CAPACITY not in operation_kwargs:
operation_kwargs.update(self.get_consumed_capacity_map(TOTAL))
self._log_debug(operation_name, operation_kwargs)
table_name = operation_kwargs.get(TABLE_NAME)
req_uuid = uuid.uuid4()
self.send_pre_boto_callback(operation_name, req_uuid, table_name)
data = self._make_api_call(operation_name, operation_kwargs)
self.send_post_boto_callback(operation_name, req_uuid, table_name)
if data and CONSUMED_CAPACITY in data:
capacity = data.get(CONSUMED_CAPACITY)
if isinstance(capacity, dict) and CAPACITY_UNITS in capacity:
capacity = capacity.get(CAPACITY_UNITS)
log.debug("%s %s consumed %s units", data.get(TABLE_NAME, ''), operation_name, capacity)
return data
def send_post_boto_callback(self, operation_name, req_uuid, table_name):
try:
post_dynamodb_send.send(self, operation_name=operation_name, table_name=table_name, req_uuid=req_uuid)
except Exception as e:
log.exception("post_boto callback threw an exception.")
def send_pre_boto_callback(self, operation_name, req_uuid, table_name):
try:
pre_dynamodb_send.send(self, operation_name=operation_name, table_name=table_name, req_uuid=req_uuid)
except Exception as e:
log.exception("pre_boto callback threw an exception.")
def _make_api_call(self, operation_name, operation_kwargs):
operation_model = self.client._service_model.operation_model(operation_name)
request_dict = self.client._convert_to_request_dict(
operation_kwargs,
operation_model
)
prepared_request = self._create_prepared_request(request_dict, operation_model)
for i in range(0, self._max_retry_attempts_exception + 1):
attempt_number = i + 1
is_last_attempt_for_exceptions = i == self._max_retry_attempts_exception
try:
response = self.requests_session.send(
prepared_request,
timeout=self._request_timeout_seconds,
proxies=self.client._endpoint.proxies,
)
data = response.json()
except (requests.RequestException, ValueError) as e:
if is_last_attempt_for_exceptions:
log.debug('Reached the maximum number of retry attempts: %s', attempt_number)
raise
else:
log.debug(
'Retry needed for (%s) after attempt %s, retryable %s caught: %s',
operation_name,
attempt_number,
e.__class__.__name__,
e
)
continue
if response.status_code >= 300:
code = data.get('__type', '')
if '#' in code:
code = code.rsplit('#', 1)[1]
botocore_expected_format = {'Error': {'Message': data.get('message', ''), 'Code': code}}
verbose_properties = {
'request_id': response.headers.get('x-amzn-RequestId')
}
if 'RequestItems' in operation_kwargs:
verbose_properties['table_name'] = ','.join(operation_kwargs['RequestItems'])
else:
verbose_properties['table_name'] = operation_kwargs.get('TableName')
try:
raise VerboseClientError(botocore_expected_format, operation_name, verbose_properties)
except VerboseClientError as e:
if is_last_attempt_for_exceptions:
log.debug('Reached the maximum number of retry attempts: %s', attempt_number)
raise
elif response.status_code < 500 and code != 'ProvisionedThroughputExceededException':
# throughput related errors) because we assume they will fail in perpetuity.
# Retrying when there is already contention could cause other problems
# in part due to unnecessary consumption of throughput.
raise
else:
# We use fully-jittered exponentially-backed-off retries:
# https://www.awsarchitectureblog.com/2015/03/backoff.html
sleep_time_ms = random.randint(0, self._base_backoff_ms * (2 ** i))
log.debug(
'Retry with backoff needed for (%s) after attempt %s,'
'sleeping for %s milliseconds, retryable %s caught: %s',
operation_name,
attempt_number,
sleep_time_ms,
e.__class__.__name__,
e
)
time.sleep(sleep_time_ms / 1000.0)
continue
return self._handle_binary_attributes(data)
@staticmethod
def _handle_binary_attributes(data):
if ITEM in data:
for attr in six.itervalues(data[ITEM]):
_convert_binary(attr)
if ITEMS in data:
for item in data[ITEMS]:
for attr in six.itervalues(item):
_convert_binary(attr)
if RESPONSES in data:
for item_list in six.itervalues(data[RESPONSES]):
for item in item_list:
for attr in six.itervalues(item):
_convert_binary(attr)
if LAST_EVALUATED_KEY in data:
for attr in six.itervalues(data[LAST_EVALUATED_KEY]):
_convert_binary(attr)
if UNPROCESSED_KEYS in data:
for table_data in six.itervalues(data[UNPROCESSED_KEYS]):
for item in table_data[KEYS]:
for attr in six.itervalues(item):
_convert_binary(attr)
if UNPROCESSED_ITEMS in data:
for table_unprocessed_requests in six.itervalues(data[UNPROCESSED_ITEMS]):
for request in table_unprocessed_requests:
for item_mapping in six.itervalues(request):
for item in six.itervalues(item_mapping):
for attr in six.itervalues(item):
_convert_binary(attr)
if ATTRIBUTES in data:
for attr in six.itervalues(data[ATTRIBUTES]):
_convert_binary(attr)
return data
@property
def session(self):
# botocore client creation is not thread safe as of v1.2.5+ (see issue #153)
if getattr(self._local, 'session', None) is None:
self._local.session = get_session()
return self._local.session
@property
def requests_session(self):
if self._requests_session is None:
self._requests_session = self.session_cls()
return self._requests_session
@property
def client(self):
# botocore has a known issue where it will cache empty credentials
# https://github.com/boto/botocore/blob/4d55c9b4142/botocore/credentials.py#L1016-L1021
# if the client does not have credentials, we create a new client
# otherwise the client is permanently poisoned in the case of metadata service flakiness when using IAM roles
if not self._client or (self._client._request_signer and not self._client._request_signer._credentials):
self._client = self.session.create_client(SERVICE_NAME, self.region, endpoint_url=self.host)
return self._client
def get_meta_table(self, table_name, refresh=False):
if table_name not in self._tables or refresh:
operation_kwargs = {
TABLE_NAME: table_name
}
try:
data = self.dispatch(DESCRIBE_TABLE, operation_kwargs)
self._tables[table_name] = MetaTable(data.get(TABLE_KEY))
except BotoCoreError as e:
raise TableError("Unable to describe table: {0}".format(e), e)
except ClientError as e:
if 'ResourceNotFound' in e.response['Error']['Code']:
raise TableDoesNotExist(e.response['Error']['Message'])
else:
raise
return self._tables[table_name]
def create_table(self,
table_name,
attribute_definitions=None,
key_schema=None,
read_capacity_units=None,
write_capacity_units=None,
global_secondary_indexes=None,
local_secondary_indexes=None,
stream_specification=None):
operation_kwargs = {
TABLE_NAME: table_name,
PROVISIONED_THROUGHPUT: {
READ_CAPACITY_UNITS: read_capacity_units,
WRITE_CAPACITY_UNITS: write_capacity_units
}
}
attrs_list = []
if attribute_definitions is None:
raise ValueError("attribute_definitions argument is required")
for attr in attribute_definitions:
attrs_list.append({
ATTR_NAME: attr.get(pythonic(ATTR_NAME)),
ATTR_TYPE: attr.get(pythonic(ATTR_TYPE))
})
operation_kwargs[ATTR_DEFINITIONS] = attrs_list
if global_secondary_indexes:
global_secondary_indexes_list = []
for index in global_secondary_indexes:
global_secondary_indexes_list.append({
INDEX_NAME: index.get(pythonic(INDEX_NAME)),
KEY_SCHEMA: sorted(index.get(pythonic(KEY_SCHEMA)), key=lambda x: x.get(KEY_TYPE)),
PROJECTION: index.get(pythonic(PROJECTION)),
PROVISIONED_THROUGHPUT: index.get(pythonic(PROVISIONED_THROUGHPUT))
})
operation_kwargs[GLOBAL_SECONDARY_INDEXES] = global_secondary_indexes_list
if key_schema is None:
raise ValueError("key_schema is required")
key_schema_list = []
for item in key_schema:
key_schema_list.append({
ATTR_NAME: item.get(pythonic(ATTR_NAME)),
KEY_TYPE: str(item.get(pythonic(KEY_TYPE))).upper()
})
operation_kwargs[KEY_SCHEMA] = sorted(key_schema_list, key=lambda x: x.get(KEY_TYPE))
local_secondary_indexes_list = []
if local_secondary_indexes:
for index in local_secondary_indexes:
local_secondary_indexes_list.append({
INDEX_NAME: index.get(pythonic(INDEX_NAME)),
KEY_SCHEMA: sorted(index.get(pythonic(KEY_SCHEMA)), key=lambda x: x.get(KEY_TYPE)),
PROJECTION: index.get(pythonic(PROJECTION)),
})
operation_kwargs[LOCAL_SECONDARY_INDEXES] = local_secondary_indexes_list
if stream_specification:
operation_kwargs[STREAM_SPECIFICATION] = {
STREAM_ENABLED: stream_specification[pythonic(STREAM_ENABLED)],
STREAM_VIEW_TYPE: stream_specification[pythonic(STREAM_VIEW_TYPE)]
}
try:
data = self.dispatch(CREATE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to create table: {0}".format(e), e)
return data
def delete_table(self, table_name):
operation_kwargs = {
TABLE_NAME: table_name
}
try:
data = self.dispatch(DELETE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to delete table: {0}".format(e), e)
return data
def update_table(self,
table_name,
read_capacity_units=None,
write_capacity_units=None,
global_secondary_index_updates=None):
operation_kwargs = {
TABLE_NAME: table_name
}
if read_capacity_units and not write_capacity_units or write_capacity_units and not read_capacity_units:
raise ValueError("read_capacity_units and write_capacity_units are required together")
if read_capacity_units and write_capacity_units:
operation_kwargs[PROVISIONED_THROUGHPUT] = {
READ_CAPACITY_UNITS: read_capacity_units,
WRITE_CAPACITY_UNITS: write_capacity_units
}
if global_secondary_index_updates:
global_secondary_indexes_list = []
for index in global_secondary_index_updates:
global_secondary_indexes_list.append({
UPDATE: {
INDEX_NAME: index.get(pythonic(INDEX_NAME)),
PROVISIONED_THROUGHPUT: {
READ_CAPACITY_UNITS: index.get(pythonic(READ_CAPACITY_UNITS)),
WRITE_CAPACITY_UNITS: index.get(pythonic(WRITE_CAPACITY_UNITS))
}
}
})
operation_kwargs[GLOBAL_SECONDARY_INDEX_UPDATES] = global_secondary_indexes_list
try:
return self.dispatch(UPDATE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to update table: {0}".format(e), e)
def list_tables(self, exclusive_start_table_name=None, limit=None):
operation_kwargs = {}
if exclusive_start_table_name:
operation_kwargs.update({
EXCLUSIVE_START_TABLE_NAME: exclusive_start_table_name
})
if limit is not None:
operation_kwargs.update({
LIMIT: limit
})
try:
return self.dispatch(LIST_TABLES, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Unable to list tables: {0}".format(e), e)
def describe_table(self, table_name):
try:
tbl = self.get_meta_table(table_name, refresh=True)
if tbl:
return tbl.data
except ValueError:
pass
raise TableDoesNotExist(table_name)
def get_conditional_operator(self, operator):
operator = operator.upper()
if operator not in CONDITIONAL_OPERATORS:
raise ValueError(
"The {0} must be one of {1}".format(
CONDITIONAL_OPERATOR,
CONDITIONAL_OPERATORS
)
)
return {
CONDITIONAL_OPERATOR: operator
}
def get_item_attribute_map(self, table_name, attributes, item_key=ITEM, pythonic_key=True):
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_item_attribute_map(
attributes,
item_key=item_key,
pythonic_key=pythonic_key)
def get_expected_map(self, table_name, expected):
kwargs = {EXPECTED: {}}
for key, condition in expected.items():
if EXISTS in condition:
kwargs[EXPECTED][key] = {
EXISTS: condition.get(EXISTS)
}
elif VALUE in condition:
kwargs[EXPECTED][key] = {
VALUE: {
self.get_attribute_type(table_name, key): condition.get(VALUE)
}
}
elif COMPARISON_OPERATOR in condition:
kwargs[EXPECTED][key] = {
COMPARISON_OPERATOR: condition.get(COMPARISON_OPERATOR),
}
values = []
for value in condition.get(ATTR_VALUE_LIST, []):
attr_type = self.get_attribute_type(table_name, key, value)
values.append({attr_type: self.parse_attribute(value)})
if condition.get(COMPARISON_OPERATOR) not in [NULL, NOT_NULL]:
kwargs[EXPECTED][key][ATTR_VALUE_LIST] = values
return kwargs
def parse_attribute(self, attribute, return_type=False):
if isinstance(attribute, dict):
for key in SHORT_ATTR_TYPES:
if key in attribute:
if return_type:
return key, attribute.get(key)
return attribute.get(key)
raise ValueError("Invalid attribute supplied: {0}".format(attribute))
else:
if return_type:
return None, attribute
return attribute
def get_attribute_type(self, table_name, attribute_name, value=None):
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_attribute_type(attribute_name, value=value)
def get_identifier_map(self, table_name, hash_key, range_key=None, key=KEY):
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_identifier_map(hash_key, range_key=range_key, key=key)
def get_query_filter_map(self, table_name, query_filters):
kwargs = {
QUERY_FILTER: {}
}
for key, condition in query_filters.items():
operator = condition.get(COMPARISON_OPERATOR)
if operator not in QUERY_FILTER_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, QUERY_FILTER_VALUES))
attr_value_list = []
for value in condition.get(ATTR_VALUE_LIST, []):
attr_value_list.append({
self.get_attribute_type(table_name, key, value): self.parse_attribute(value)
})
kwargs[QUERY_FILTER][key] = {
COMPARISON_OPERATOR: operator
}
if len(attr_value_list):
kwargs[QUERY_FILTER][key][ATTR_VALUE_LIST] = attr_value_list
return kwargs
def get_consumed_capacity_map(self, return_consumed_capacity):
if return_consumed_capacity.upper() not in RETURN_CONSUMED_CAPACITY_VALUES:
raise ValueError("{0} must be one of {1}".format(RETURN_ITEM_COLL_METRICS, RETURN_CONSUMED_CAPACITY_VALUES))
return {
RETURN_CONSUMED_CAPACITY: str(return_consumed_capacity).upper()
}
def get_return_values_map(self, return_values):
if return_values.upper() not in RETURN_VALUES_VALUES:
raise ValueError("{0} must be one of {1}".format(RETURN_VALUES, RETURN_VALUES_VALUES))
return {
RETURN_VALUES: str(return_values).upper()
}
def get_item_collection_map(self, return_item_collection_metrics):
if return_item_collection_metrics.upper() not in RETURN_ITEM_COLL_METRICS_VALUES:
raise ValueError("{0} must be one of {1}".format(RETURN_ITEM_COLL_METRICS, RETURN_ITEM_COLL_METRICS_VALUES))
return {
RETURN_ITEM_COLL_METRICS: str(return_item_collection_metrics).upper()
}
def get_exclusive_start_key_map(self, table_name, exclusive_start_key):
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_exclusive_start_key_map(exclusive_start_key)
def delete_item(self,
table_name,
hash_key,
range_key=None,
condition=None,
expected=None,
conditional_operator=None,
return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
self._check_condition('condition', condition, expected, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key))
name_placeholders = {}
expression_attribute_values = {}
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if return_values:
operation_kwargs.update(self.get_return_values_map(return_values))
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
# We read the conditional operator even without expected passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if expected:
condition_expression = self._get_condition_expression(
table_name, expected, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(DELETE_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise DeleteError("Failed to delete item: {0}".format(e), e)
def update_item(self,
table_name,
hash_key,
range_key=None,
actions=None,
attribute_updates=None,
condition=None,
expected=None,
return_consumed_capacity=None,
conditional_operator=None,
return_item_collection_metrics=None,
return_values=None):
self._check_actions(actions, attribute_updates)
self._check_condition('condition', condition, expected, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key))
name_placeholders = {}
expression_attribute_values = {}
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
if return_values:
operation_kwargs.update(self.get_return_values_map(return_values))
if not actions and not attribute_updates:
raise ValueError("{0} cannot be empty".format(ATTR_UPDATES))
actions = actions or []
attribute_updates = attribute_updates or {}
update_expression = Update(*actions)
# We sort the keys here for determinism. This is mostly done to simplify testing.
for key in sorted(attribute_updates.keys()):
path = Path([key])
update = attribute_updates[key]
action = update.get(ACTION)
if action not in ATTR_UPDATE_ACTIONS:
raise ValueError("{0} must be one of {1}".format(ACTION, ATTR_UPDATE_ACTIONS))
value = update.get(VALUE)
attr_type, value = self.parse_attribute(value, return_type=True)
if attr_type is None and action != DELETE:
attr_type = self.get_attribute_type(table_name, key, value)
value = {attr_type: value}
if action == DELETE:
action = path.remove() if attr_type is None else path.delete(value)
elif action == PUT:
action = path.set(value)
else:
action = path.add(value)
update_expression.add_action(action)
operation_kwargs[UPDATE_EXPRESSION] = update_expression.serialize(name_placeholders, expression_attribute_values)
# We read the conditional operator even without expected passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if expected:
condition_expression = self._get_condition_expression(
table_name, expected, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(UPDATE_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise UpdateError("Failed to update item: {0}".format(e), e)
def put_item(self,
table_name,
hash_key,
range_key=None,
attributes=None,
condition=None,
expected=None,
conditional_operator=None,
return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
self._check_condition('condition', condition, expected, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key, key=ITEM))
name_placeholders = {}
expression_attribute_values = {}
if attributes:
attrs = self.get_item_attribute_map(table_name, attributes)
operation_kwargs[ITEM].update(attrs[ITEM])
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
if return_values:
operation_kwargs.update(self.get_return_values_map(return_values))
# We read the conditional operator even without expected passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if expected:
condition_expression = self._get_condition_expression(
table_name, expected, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(PUT_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise PutError("Failed to put item: {0}".format(e), e)
def batch_write_item(self,
table_name,
put_items=None,
delete_items=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
if put_items is None and delete_items is None:
raise ValueError("Either put_items or delete_items must be specified")
operation_kwargs = {
REQUEST_ITEMS: {
table_name: []
}
}
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
put_items_list = []
if put_items:
for item in put_items:
put_items_list.append({
PUT_REQUEST: self.get_item_attribute_map(table_name, item, pythonic_key=False)
})
delete_items_list = []
if delete_items:
for item in delete_items:
delete_items_list.append({
DELETE_REQUEST: self.get_item_attribute_map(table_name, item, item_key=KEY, pythonic_key=False)
})
operation_kwargs[REQUEST_ITEMS][table_name] = delete_items_list + put_items_list
try:
return self.dispatch(BATCH_WRITE_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise PutError("Failed to batch write items: {0}".format(e), e)
def batch_get_item(self,
table_name,
keys,
consistent_read=None,
return_consumed_capacity=None,
attributes_to_get=None):
operation_kwargs = {
REQUEST_ITEMS: {
table_name: {}
}
}
args_map = {}
name_placeholders = {}
if consistent_read:
args_map[CONSISTENT_READ] = consistent_read
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
args_map[PROJECTION_EXPRESSION] = projection_expression
if name_placeholders:
args_map[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
operation_kwargs[REQUEST_ITEMS][table_name].update(args_map)
keys_map = {KEYS: []}
for key in keys:
keys_map[KEYS].append(
self.get_item_attribute_map(table_name, key)[ITEM]
)
operation_kwargs[REQUEST_ITEMS][table_name].update(keys_map)
try:
return self.dispatch(BATCH_GET_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise GetError("Failed to batch get items: {0}".format(e), e)
def get_item(self,
table_name,
hash_key,
range_key=None,
consistent_read=False,
attributes_to_get=None):
operation_kwargs = {}
name_placeholders = {}
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
operation_kwargs[CONSISTENT_READ] = consistent_read
operation_kwargs[TABLE_NAME] = table_name
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key))
try:
return self.dispatch(GET_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise GetError("Failed to get item: {0}".format(e), e)
def rate_limited_scan(self,
table_name,
filter_condition=None,
attributes_to_get=None,
page_size=None,
limit=None,
conditional_operator=None,
scan_filter=None,
exclusive_start_key=None,
segment=None,
total_segments=None,
timeout_seconds=None,
read_capacity_to_consume_per_second=10,
allow_rate_limited_scan_without_consumed_capacity=None,
max_sleep_between_retry=10,
max_consecutive_exceptions=10,
consistent_read=None,
index_name=None):
read_capacity_to_consume_per_ms = float(read_capacity_to_consume_per_second) / 1000
if allow_rate_limited_scan_without_consumed_capacity is None:
allow_rate_limited_scan_without_consumed_capacity = get_settings_value(
'allow_rate_limited_scan_without_consumed_capacity'
)
total_consumed_read_capacity = 0.0
last_evaluated_key = exclusive_start_key
rate_available = True
latest_scan_consumed_capacity = 0
consecutive_provision_throughput_exceeded_ex = 0
start_time = time.time()
if page_size is None:
if limit and read_capacity_to_consume_per_second > limit:
page_size = limit
else:
page_size = read_capacity_to_consume_per_second
while True:
if rate_available:
try:
data = self.scan(
table_name,
filter_condition=filter_condition,
attributes_to_get=attributes_to_get,
exclusive_start_key=last_evaluated_key,
limit=page_size,
conditional_operator=conditional_operator,
return_consumed_capacity=TOTAL,
scan_filter=scan_filter,
segment=segment,
total_segments=total_segments,
consistent_read=consistent_read,
index_name=index_name
)
for item in data.get(ITEMS):
yield item
if limit is not None:
limit -= 1
if not limit:
return
if CONSUMED_CAPACITY in data:
latest_scan_consumed_capacity = data.get(CONSUMED_CAPACITY).get(CAPACITY_UNITS)
else:
if allow_rate_limited_scan_without_consumed_capacity:
latest_scan_consumed_capacity = 0
else:
raise ScanError('Rate limited scan not possible because the server did not send back'
'consumed capacity information. If you wish scans to complete anyway'
'without functioning rate limiting, set '
'allow_rate_limited_scan_without_consumed_capacity to True in settings.')
last_evaluated_key = data.get(LAST_EVALUATED_KEY, None)
consecutive_provision_throughput_exceeded_ex = 0
except ScanError as e:
# Only retry if provision throughput is exceeded.
if isinstance(e.cause, ClientError):
code = e.cause.response['Error'].get('Code')
if code == "ProvisionedThroughputExceededException":
consecutive_provision_throughput_exceeded_ex += 1
if consecutive_provision_throughput_exceeded_ex > max_consecutive_exceptions:
# Max threshold reached
raise
else:
# Different exception, other than ProvisionedThroughputExceededException
raise
else:
# Not a Client error
raise
# No throttling, and no more scans needed. Just return
if not last_evaluated_key and consecutive_provision_throughput_exceeded_ex == 0:
return
current_time = time.time()
# elapsed_time_ms indicates the time taken in ms from the start of the
# throttled_scan call.
elapsed_time_ms = max(1, round((current_time - start_time) * 1000))
if consecutive_provision_throughput_exceeded_ex == 0:
total_consumed_read_capacity += latest_scan_consumed_capacity
consumed_rate = total_consumed_read_capacity / elapsed_time_ms
rate_available = (read_capacity_to_consume_per_ms - consumed_rate) >= 0
# consecutive_provision_throughput_exceeded_ex > 0 indicates ProvisionedThroughputExceededException occurred.
# ProvisionedThroughputExceededException can occur if:
# - The rate to consume is passed incorrectly.
# - External factors, even if the current scan is within limits.
if not rate_available or (consecutive_provision_throughput_exceeded_ex > 0):
# Minimum value is 1 second.
elapsed_time_s = math.ceil(elapsed_time_ms / 1000)
# Sleep proportional to the ratio of --consumed capacity-- to --capacity to consume--
time_to_sleep = max(1, round((total_consumed_read_capacity/ elapsed_time_s) \
/ read_capacity_to_consume_per_second))
# At any moment if the timeout_seconds hits, then return
if timeout_seconds and (elapsed_time_s + time_to_sleep) > timeout_seconds:
raise ScanError("Input timeout value {0} has expired".format(timeout_seconds))
time.sleep(min(math.ceil(time_to_sleep), max_sleep_between_retry))
# Reset the latest_scan_consumed_capacity, as no scan operation was performed.
latest_scan_consumed_capacity = 0
def scan(self,
table_name,
filter_condition=None,
attributes_to_get=None,
limit=None,
conditional_operator=None,
scan_filter=None,
return_consumed_capacity=None,
exclusive_start_key=None,
segment=None,
total_segments=None,
consistent_read=None,
index_name=None):
self._check_condition('filter_condition', filter_condition, scan_filter, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
name_placeholders = {}
expression_attribute_values = {}
if filter_condition is not None:
filter_expression = filter_condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if index_name:
operation_kwargs[INDEX_NAME] = index_name
if limit is not None:
operation_kwargs[LIMIT] = limit
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if exclusive_start_key:
operation_kwargs.update(self.get_exclusive_start_key_map(table_name, exclusive_start_key))
if segment is not None:
operation_kwargs[SEGMENT] = segment
if total_segments:
operation_kwargs[TOTAL_SEGMENTS] = total_segments
if scan_filter:
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
filter_expression = self._get_filter_expression(
table_name, scan_filter, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if consistent_read:
operation_kwargs[CONSISTENT_READ] = consistent_read
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(SCAN, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise ScanError("Failed to scan table: {0}".format(e), e)
def query(self,
table_name,
hash_key,
range_key_condition=None,
filter_condition=None,
attributes_to_get=None,
consistent_read=False,
exclusive_start_key=None,
index_name=None,
key_conditions=None,
query_filters=None,
conditional_operator=None,
limit=None,
return_consumed_capacity=None,
scan_index_forward=None,
select=None):
self._check_condition('range_key_condition', range_key_condition, key_conditions, conditional_operator)
self._check_condition('filter_condition', filter_condition, query_filters, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
name_placeholders = {}
expression_attribute_values = {}
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table: {0}".format(table_name))
if index_name:
hash_keyname = tbl.get_index_hash_keyname(index_name)
if not hash_keyname:
raise ValueError("No hash key attribute for index: {0}".format(index_name))
range_keyname = tbl.get_index_range_keyname(index_name)
else:
hash_keyname = tbl.hash_keyname
range_keyname = tbl.range_keyname
key_condition = self._get_condition(table_name, hash_keyname, '__eq__', hash_key)
if range_key_condition is not None:
if range_key_condition.is_valid_range_key_condition(range_keyname):
key_condition = key_condition & range_key_condition
elif filter_condition is None:
# Try to gracefully handle the case where a user passed in a filter as a range key condition
(filter_condition, range_key_condition) = (range_key_condition, None)
else:
raise ValueError("{0} is not a valid range key condition".format(range_key_condition))
if key_conditions is None or len(key_conditions) == 0:
pass # No comparisons on sort key
elif len(key_conditions) > 1:
raise ValueError("Multiple attributes are not supported in key_conditions: {0}".format(key_conditions))
else:
(key, condition), = key_conditions.items()
operator = condition.get(COMPARISON_OPERATOR)
if operator not in COMPARISON_OPERATOR_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, COMPARISON_OPERATOR_VALUES))
operator = KEY_CONDITION_OPERATOR_MAP[operator]
values = condition.get(ATTR_VALUE_LIST)
sort_key_expression = self._get_condition(table_name, key, operator, *values)
key_condition = key_condition & sort_key_expression
operation_kwargs[KEY_CONDITION_EXPRESSION] = key_condition.serialize(
name_placeholders, expression_attribute_values)
if filter_condition is not None:
filter_expression = filter_condition.serialize(name_placeholders, expression_attribute_values)
# FilterExpression does not allow key attributes. Check for hash and range key name placeholders
hash_key_placeholder = name_placeholders.get(hash_keyname)
range_key_placeholder = range_keyname and name_placeholders.get(range_keyname)
if (
hash_key_placeholder in filter_expression or
(range_key_placeholder and range_key_placeholder in filter_expression)
):
raise ValueError("'filter_condition' cannot contain key attributes")
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if attributes_to_get:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if consistent_read:
operation_kwargs[CONSISTENT_READ] = True
if exclusive_start_key:
operation_kwargs.update(self.get_exclusive_start_key_map(table_name, exclusive_start_key))
if index_name:
operation_kwargs[INDEX_NAME] = index_name
if limit is not None:
operation_kwargs[LIMIT] = limit
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
# We read the conditional operator even without a query filter passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if query_filters:
filter_expression = self._get_filter_expression(
table_name, query_filters, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if select:
if select.upper() not in SELECT_VALUES:
raise ValueError("{0} must be one of {1}".format(SELECT, SELECT_VALUES))
operation_kwargs[SELECT] = str(select).upper()
if scan_index_forward is not None:
operation_kwargs[SCAN_INDEX_FORWARD] = scan_index_forward
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(QUERY, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise QueryError("Failed to query items: {0}".format(e), e)
def _get_condition_expression(self, table_name, expected, conditional_operator,
name_placeholders, expression_attribute_values):
condition_expression = None
conditional_operator = conditional_operator[CONDITIONAL_OPERATOR]
# We sort the keys here for determinism. This is mostly done to simplify testing.
for key in sorted(expected.keys()):
condition = expected[key]
if EXISTS in condition:
operator = NOT_NULL if condition.get(EXISTS, True) else NULL
values = []
elif VALUE in condition:
operator = EQ
values = [condition.get(VALUE)]
else:
operator = condition.get(COMPARISON_OPERATOR)
values = condition.get(ATTR_VALUE_LIST, [])
if operator not in QUERY_FILTER_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, QUERY_FILTER_VALUES))
not_contains = operator == NOT_CONTAINS
operator = FILTER_EXPRESSION_OPERATOR_MAP[operator]
condition = self._get_condition(table_name, key, operator, *values)
if not_contains:
condition = ~condition
if condition_expression is None:
condition_expression = condition
elif conditional_operator == AND:
condition_expression = condition_expression & condition
else:
condition_expression = condition_expression | condition
return condition_expression.serialize(name_placeholders, expression_attribute_values)
def _get_filter_expression(self, table_name, filters, conditional_operator,
name_placeholders, expression_attribute_values):
condition_expression = None
conditional_operator = conditional_operator[CONDITIONAL_OPERATOR]
# We sort the keys here for determinism. This is mostly done to simplify testing.
for key in sorted(filters.keys()):
condition = filters[key]
operator = condition.get(COMPARISON_OPERATOR)
if operator not in QUERY_FILTER_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, QUERY_FILTER_VALUES))
not_contains = operator == NOT_CONTAINS
operator = FILTER_EXPRESSION_OPERATOR_MAP[operator]
values = condition.get(ATTR_VALUE_LIST, [])
condition = self._get_condition(table_name, key, operator, *values)
if not_contains:
condition = ~condition
if condition_expression is None:
condition_expression = condition
elif conditional_operator == AND:
condition_expression = condition_expression & condition
else:
condition_expression = condition_expression | condition
return condition_expression.serialize(name_placeholders, expression_attribute_values)
def _get_condition(self, table_name, attribute_name, operator, *values):
values = [
{self.get_attribute_type(table_name, attribute_name, value): self.parse_attribute(value)}
for value in values
]
return getattr(Path([attribute_name]), operator)(*values)
def _check_actions(self, actions, attribute_updates):
if actions is not None:
if attribute_updates is not None:
raise ValueError("Legacy attribute updates cannot be used with update actions")
else:
if attribute_updates is not None:
warnings.warn("Legacy attribute updates are deprecated in favor of update actions")
def _check_condition(self, name, condition, expected_or_filter, conditional_operator):
if condition is not None:
if not isinstance(condition, Condition):
raise ValueError("'{0}' must be an instance of Condition".format(name))
if expected_or_filter or conditional_operator is not None:
raise ValueError("Legacy conditional parameters cannot be used with condition expressions")
else:
if expected_or_filter or conditional_operator is not None:
warnings.warn("Legacy conditional parameters are deprecated in favor of condition expressions")
@staticmethod
def _reverse_dict(d):
return dict((v, k) for k, v in six.iteritems(d))
def _convert_binary(attr):
if BINARY_SHORT in attr:
attr[BINARY_SHORT] = b64decode(attr[BINARY_SHORT].encode(DEFAULT_ENCODING))
elif BINARY_SET_SHORT in attr:
value = attr[BINARY_SET_SHORT]
if value and len(value):
attr[BINARY_SET_SHORT] = set(b64decode(v.encode(DEFAULT_ENCODING)) for v in value)
| true
| true
|
7906d553d5dac4011032f2bf891deac7b7498d0d
| 8,244
|
py
|
Python
|
tests/e2e/performance/csi_tests/test_bulk_pod_attachtime_performance.py
|
srivickynesh/ocs-ci
|
994b8635a2f44ec7982585cfb293215aa8b27d2a
|
[
"MIT"
] | null | null | null |
tests/e2e/performance/csi_tests/test_bulk_pod_attachtime_performance.py
|
srivickynesh/ocs-ci
|
994b8635a2f44ec7982585cfb293215aa8b27d2a
|
[
"MIT"
] | null | null | null |
tests/e2e/performance/csi_tests/test_bulk_pod_attachtime_performance.py
|
srivickynesh/ocs-ci
|
994b8635a2f44ec7982585cfb293215aa8b27d2a
|
[
"MIT"
] | null | null | null |
"""
Test to verify performance of attaching number of pods as a bulk, each pod attached to one pvc only
The test results will be uploaded to the ES server
"""
import logging
import os
import pytest
import pathlib
import time
from concurrent.futures import ThreadPoolExecutor
from ocs_ci.framework.testlib import performance, polarion_id
from ocs_ci.helpers import helpers
from ocs_ci.helpers.helpers import get_full_test_logs_path
from ocs_ci.ocs import defaults, constants, scale_lib
from ocs_ci.ocs.resources.pod import get_pod_obj
from ocs_ci.ocs.perftests import PASTest
from ocs_ci.ocs.perfresult import ResultsAnalyse
from ocs_ci.ocs.resources.objectconfigfile import ObjectConfFile
from ocs_ci.utility.utils import ocsci_log_path
log = logging.getLogger(__name__)
@performance
class TestBulkPodAttachPerformance(PASTest):
"""
Test to measure performance of attaching pods to pvc in a bulk
"""
pvc_size = "1Gi"
def setup(self):
"""
Setting up test parameters
"""
log.info("Starting the test setup")
super(TestBulkPodAttachPerformance, self).setup()
self.benchmark_name = "bulk_pod_attach_time"
# Pulling the pod image to the worker node, so pull image will not calculate
# in the total attach time
helpers.pull_images(constants.PERF_IMAGE)
@pytest.fixture()
def base_setup(self, project_factory, interface_type, storageclass_factory):
"""
A setup phase for the test
Args:
interface_type: Interface type
storageclass_factory: A fixture to create everything needed for a storage class
"""
self.interface = interface_type
self.sc_obj = storageclass_factory(self.interface)
proj_obj = project_factory()
self.namespace = proj_obj.namespace
if self.interface == constants.CEPHFILESYSTEM:
self.sc = "CephFS"
if self.interface == constants.CEPHBLOCKPOOL:
self.sc = "RBD"
@pytest.mark.parametrize(
argnames=["interface_type", "bulk_size"],
argvalues=[
pytest.param(
*[constants.CEPHBLOCKPOOL, 120],
),
pytest.param(
*[constants.CEPHBLOCKPOOL, 240],
),
pytest.param(
*[constants.CEPHFILESYSTEM, 120],
),
pytest.param(
*[constants.CEPHFILESYSTEM, 240],
),
],
)
@pytest.mark.usefixtures(base_setup.__name__)
@polarion_id("OCS-1620")
def test_bulk_pod_attach_performance(self, teardown_factory, bulk_size):
"""
Measures pods attachment time in bulk_size bulk
Args:
teardown_factory: A fixture used when we want a new resource that was created during the tests
to be removed in the teardown phase.
bulk_size: Size of the bulk to be tested
Returns:
"""
# Getting the test start time
test_start_time = PASTest.get_time()
log.info(f"Start creating bulk of new {bulk_size} PVCs")
pvc_objs, _ = helpers.create_multiple_pvcs(
sc_name=self.sc_obj.name,
namespace=self.namespace,
number_of_pvc=bulk_size,
size=self.pvc_size,
burst=True,
)
for pvc_obj in pvc_objs:
pvc_obj.reload()
teardown_factory(pvc_obj)
with ThreadPoolExecutor(max_workers=5) as executor:
for pvc_obj in pvc_objs:
executor.submit(
helpers.wait_for_resource_state, pvc_obj, constants.STATUS_BOUND
)
executor.submit(pvc_obj.reload)
start_time = helpers.get_provision_time(
self.interface, pvc_objs, status="start"
)
end_time = helpers.get_provision_time(self.interface, pvc_objs, status="end")
total_time = (end_time - start_time).total_seconds()
log.info(
f"{self.interface}: Bulk of {bulk_size} PVCs creation time is {total_time} seconds."
)
pvc_names_list = []
for pvc_obj in pvc_objs:
pvc_names_list.append(pvc_obj.name)
log.info(f"{self.interface} : Before pod attach")
bulk_start_time = time.time()
pod_data_list = list()
pod_data_list.extend(
scale_lib.attach_multiple_pvc_to_pod_dict(
pvc_list=pvc_names_list,
namespace=self.namespace,
pvcs_per_pod=1,
)
)
lcl = locals()
tmp_path = pathlib.Path(ocsci_log_path())
obj_name = "obj1"
# Create kube_job for pod creation
lcl[f"pod_kube_{obj_name}"] = ObjectConfFile(
name=f"pod_kube_{obj_name}",
obj_dict_list=pod_data_list,
project=defaults.ROOK_CLUSTER_NAMESPACE,
tmp_path=tmp_path,
)
lcl[f"pod_kube_{obj_name}"].create(namespace=self.namespace)
log.info("Checking that pods are running")
# Check all the PODs reached Running state
pod_running_list = scale_lib.check_all_pod_reached_running_state_in_kube_job(
kube_job_obj=lcl[f"pod_kube_{obj_name}"],
namespace=self.namespace,
no_of_pod=len(pod_data_list),
timeout=180,
)
for pod_name in pod_running_list:
pod_obj = get_pod_obj(pod_name, self.namespace)
teardown_factory(pod_obj)
bulk_end_time = time.time()
bulk_total_time = bulk_end_time - bulk_start_time
log.info(
f"Bulk attach time of {len(pod_running_list)} pods is {bulk_total_time} seconds"
)
# Collecting environment information
self.get_env_info()
# Initialize the results doc file.
full_log_path = get_full_test_logs_path(cname=self)
self.results_path = get_full_test_logs_path(cname=self)
full_log_path += f"-{self.sc}"
full_results = self.init_full_results(
ResultsAnalyse(
self.uuid, self.crd_data, full_log_path, "pod_bulk_attachtime"
)
)
full_results.add_key("storageclass", self.sc)
full_results.add_key("pod_bulk_attach_time", bulk_total_time)
full_results.add_key("pvc_size", self.pvc_size)
full_results.add_key("bulk_size", bulk_size)
# Getting the test end time
test_end_time = PASTest.get_time()
# Add the test time to the ES report
full_results.add_key(
"test_time", {"start": test_start_time, "end": test_end_time}
)
# Write the test results into the ES server
if full_results.es_write():
res_link = full_results.results_link()
# write the ES link to the test results in the test log.
log.info(f"The result can be found at : {res_link}")
# Create text file with results of all subtest (4 - according to the parameters)
self.write_result_to_file(res_link)
def test_bulk_pod_attach_results(self):
"""
This is not a test - it is only check that previous test ran and finish as expected
and reporting the full results (links in the ES) of previous tests (4)
"""
self.number_of_tests = 4
self.results_path = get_full_test_logs_path(
cname=self, fname="test_bulk_pod_attach_performance"
)
self.results_file = os.path.join(self.results_path, "all_results.txt")
log.info(f"Check results in {self.results_file}")
self.check_tests_results()
self.push_to_dashboard(test_name="Bulk Pod Attach Time")
def init_full_results(self, full_results):
"""
Initialize the full results object which will send to the ES server
Args:
full_results (obj): an empty ResultsAnalyse object
Returns:
ResultsAnalyse (obj): the input object filled with data
"""
for key in self.environment:
full_results.add_key(key, self.environment[key])
full_results.add_key("index", full_results.new_index)
return full_results
| 34.207469
| 106
| 0.632703
|
import logging
import os
import pytest
import pathlib
import time
from concurrent.futures import ThreadPoolExecutor
from ocs_ci.framework.testlib import performance, polarion_id
from ocs_ci.helpers import helpers
from ocs_ci.helpers.helpers import get_full_test_logs_path
from ocs_ci.ocs import defaults, constants, scale_lib
from ocs_ci.ocs.resources.pod import get_pod_obj
from ocs_ci.ocs.perftests import PASTest
from ocs_ci.ocs.perfresult import ResultsAnalyse
from ocs_ci.ocs.resources.objectconfigfile import ObjectConfFile
from ocs_ci.utility.utils import ocsci_log_path
log = logging.getLogger(__name__)
@performance
class TestBulkPodAttachPerformance(PASTest):
pvc_size = "1Gi"
def setup(self):
log.info("Starting the test setup")
super(TestBulkPodAttachPerformance, self).setup()
self.benchmark_name = "bulk_pod_attach_time"
helpers.pull_images(constants.PERF_IMAGE)
@pytest.fixture()
def base_setup(self, project_factory, interface_type, storageclass_factory):
self.interface = interface_type
self.sc_obj = storageclass_factory(self.interface)
proj_obj = project_factory()
self.namespace = proj_obj.namespace
if self.interface == constants.CEPHFILESYSTEM:
self.sc = "CephFS"
if self.interface == constants.CEPHBLOCKPOOL:
self.sc = "RBD"
@pytest.mark.parametrize(
argnames=["interface_type", "bulk_size"],
argvalues=[
pytest.param(
*[constants.CEPHBLOCKPOOL, 120],
),
pytest.param(
*[constants.CEPHBLOCKPOOL, 240],
),
pytest.param(
*[constants.CEPHFILESYSTEM, 120],
),
pytest.param(
*[constants.CEPHFILESYSTEM, 240],
),
],
)
@pytest.mark.usefixtures(base_setup.__name__)
@polarion_id("OCS-1620")
def test_bulk_pod_attach_performance(self, teardown_factory, bulk_size):
test_start_time = PASTest.get_time()
log.info(f"Start creating bulk of new {bulk_size} PVCs")
pvc_objs, _ = helpers.create_multiple_pvcs(
sc_name=self.sc_obj.name,
namespace=self.namespace,
number_of_pvc=bulk_size,
size=self.pvc_size,
burst=True,
)
for pvc_obj in pvc_objs:
pvc_obj.reload()
teardown_factory(pvc_obj)
with ThreadPoolExecutor(max_workers=5) as executor:
for pvc_obj in pvc_objs:
executor.submit(
helpers.wait_for_resource_state, pvc_obj, constants.STATUS_BOUND
)
executor.submit(pvc_obj.reload)
start_time = helpers.get_provision_time(
self.interface, pvc_objs, status="start"
)
end_time = helpers.get_provision_time(self.interface, pvc_objs, status="end")
total_time = (end_time - start_time).total_seconds()
log.info(
f"{self.interface}: Bulk of {bulk_size} PVCs creation time is {total_time} seconds."
)
pvc_names_list = []
for pvc_obj in pvc_objs:
pvc_names_list.append(pvc_obj.name)
log.info(f"{self.interface} : Before pod attach")
bulk_start_time = time.time()
pod_data_list = list()
pod_data_list.extend(
scale_lib.attach_multiple_pvc_to_pod_dict(
pvc_list=pvc_names_list,
namespace=self.namespace,
pvcs_per_pod=1,
)
)
lcl = locals()
tmp_path = pathlib.Path(ocsci_log_path())
obj_name = "obj1"
lcl[f"pod_kube_{obj_name}"] = ObjectConfFile(
name=f"pod_kube_{obj_name}",
obj_dict_list=pod_data_list,
project=defaults.ROOK_CLUSTER_NAMESPACE,
tmp_path=tmp_path,
)
lcl[f"pod_kube_{obj_name}"].create(namespace=self.namespace)
log.info("Checking that pods are running")
pod_running_list = scale_lib.check_all_pod_reached_running_state_in_kube_job(
kube_job_obj=lcl[f"pod_kube_{obj_name}"],
namespace=self.namespace,
no_of_pod=len(pod_data_list),
timeout=180,
)
for pod_name in pod_running_list:
pod_obj = get_pod_obj(pod_name, self.namespace)
teardown_factory(pod_obj)
bulk_end_time = time.time()
bulk_total_time = bulk_end_time - bulk_start_time
log.info(
f"Bulk attach time of {len(pod_running_list)} pods is {bulk_total_time} seconds"
)
self.get_env_info()
full_log_path = get_full_test_logs_path(cname=self)
self.results_path = get_full_test_logs_path(cname=self)
full_log_path += f"-{self.sc}"
full_results = self.init_full_results(
ResultsAnalyse(
self.uuid, self.crd_data, full_log_path, "pod_bulk_attachtime"
)
)
full_results.add_key("storageclass", self.sc)
full_results.add_key("pod_bulk_attach_time", bulk_total_time)
full_results.add_key("pvc_size", self.pvc_size)
full_results.add_key("bulk_size", bulk_size)
test_end_time = PASTest.get_time()
full_results.add_key(
"test_time", {"start": test_start_time, "end": test_end_time}
)
if full_results.es_write():
res_link = full_results.results_link()
log.info(f"The result can be found at : {res_link}")
self.write_result_to_file(res_link)
def test_bulk_pod_attach_results(self):
self.number_of_tests = 4
self.results_path = get_full_test_logs_path(
cname=self, fname="test_bulk_pod_attach_performance"
)
self.results_file = os.path.join(self.results_path, "all_results.txt")
log.info(f"Check results in {self.results_file}")
self.check_tests_results()
self.push_to_dashboard(test_name="Bulk Pod Attach Time")
def init_full_results(self, full_results):
for key in self.environment:
full_results.add_key(key, self.environment[key])
full_results.add_key("index", full_results.new_index)
return full_results
| true
| true
|
7906d5af2639f796a512ee42f6fe28520f1c6628
| 732
|
py
|
Python
|
lesson3-functional_programming/timing.py
|
zubrik13/udacity_inter_py
|
4c7aad840048d1287e12515aeaf583ffbfbc9f56
|
[
"MIT"
] | null | null | null |
lesson3-functional_programming/timing.py
|
zubrik13/udacity_inter_py
|
4c7aad840048d1287e12515aeaf583ffbfbc9f56
|
[
"MIT"
] | null | null | null |
lesson3-functional_programming/timing.py
|
zubrik13/udacity_inter_py
|
4c7aad840048d1287e12515aeaf583ffbfbc9f56
|
[
"MIT"
] | null | null | null |
# Timing functionality from Python's built-in module
from time import perf_counter
from functools import lru_cache
def timer(fn):
def inner(*args):
start = perf_counter()
result = fn(*args)
end = perf_counter()
elapsed = end - start
print(result)
print('elapsed', elapsed)
return inner
@timer
def calc_factorial(num):
if num < 0:
raise ValueError('Please use a number not smaller than 0')
product = 1
for i in range(num):
product = product * (i+1)
return product
# @timer
# @lru_cache()
# def fib(n):
# if n < 2:
# return n
# return fib(n-1) + fib(n-2)
if __name__ == '__main__':
calc_factorial(88)
# fib(25)
| 17.853659
| 66
| 0.592896
|
from time import perf_counter
from functools import lru_cache
def timer(fn):
def inner(*args):
start = perf_counter()
result = fn(*args)
end = perf_counter()
elapsed = end - start
print(result)
print('elapsed', elapsed)
return inner
@timer
def calc_factorial(num):
if num < 0:
raise ValueError('Please use a number not smaller than 0')
product = 1
for i in range(num):
product = product * (i+1)
return product
# @timer
# @lru_cache()
# def fib(n):
# if n < 2:
# return n
# return fib(n-1) + fib(n-2)
if __name__ == '__main__':
calc_factorial(88)
# fib(25)
| true
| true
|
7906d5cf26427331db5adbd74d864ce063b47529
| 520
|
py
|
Python
|
review_heatmap/gui/forms/anki21/__init__.py
|
kb1900/Anki-Addons
|
3b764af8657065c369d404025a3f11c964192a33
|
[
"MIT"
] | 1
|
2019-06-23T04:46:24.000Z
|
2019-06-23T04:46:24.000Z
|
review_heatmap/gui/forms/anki21/__init__.py
|
kb1900/Anki-Addons
|
3b764af8657065c369d404025a3f11c964192a33
|
[
"MIT"
] | null | null | null |
review_heatmap/gui/forms/anki21/__init__.py
|
kb1900/Anki-Addons
|
3b764af8657065c369d404025a3f11c964192a33
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Review Heatmap Add-on for Anki
# Copyright (C) 2016-2019 Glutanimate <https://glutanimate.com>
#
# This file was automatically generated by Anki Add-on Builder v0.1.4
# It is subject to the same licensing terms as the rest of the program
# (see the LICENSE file which accompanies this program).
#
# WARNING! All changes made in this file will be lost!
"""
Initializes generated Qt forms/resources
"""
__all__ = [
"options",
"contrib"
]
from . import options
from . import contrib
| 22.608696
| 70
| 0.709615
|
__all__ = [
"options",
"contrib"
]
from . import options
from . import contrib
| true
| true
|
7906d5ddfaa817ec33d0183b890d6b6ae72fd4eb
| 7,525
|
py
|
Python
|
example/gdb-loc/gdbloc/parser.py
|
rocky/python-spark
|
d3f966a4e8c191c51b1dcfa444026b4c6831984f
|
[
"MIT"
] | 43
|
2016-04-24T15:20:16.000Z
|
2022-03-19T21:01:29.000Z
|
example/gdb-loc/gdbloc/parser.py
|
rocky/python-spark
|
d3f966a4e8c191c51b1dcfa444026b4c6831984f
|
[
"MIT"
] | 11
|
2016-06-01T16:06:38.000Z
|
2020-05-20T20:15:32.000Z
|
example/gdb-loc/gdbloc/parser.py
|
rocky/python-spark
|
d3f966a4e8c191c51b1dcfa444026b4c6831984f
|
[
"MIT"
] | 12
|
2016-05-24T12:15:04.000Z
|
2021-11-20T02:14:00.000Z
|
# Copyright (c) 2017 Rocky Bernstein
"""
Parsing for a trepan2/trepan3k debugger
"breakpoint', "list", or "disasm" command arguments
This is a debugger location along with:
- an optional condition parsing for breakpoints commands
- a range or count for "list" commands
"""
from __future__ import print_function
import sys
from spark_parser.ast import AST
from gdbloc.scanner import LocationScanner, ScannerError
from spark_parser import GenericASTBuilder, DEFAULT_DEBUG
class LocationError(Exception):
def __init__(self, text, text_cursor):
self.text = text
self.text_cursor = text_cursor
def __str__(self):
return self.text + "\n" + self.text_cursor
class LocationParser(GenericASTBuilder):
"""Location parsing as used in trepan2 and trepan3k
for list, breakpoint, and assembly commands
Note: function parse() comes from GenericASTBuilder
"""
def __init__(self, start_nt, text, debug=DEFAULT_DEBUG):
super(LocationParser, self).__init__(AST, start_nt, debug=debug)
self.debug = debug
self.text = text
def error(self, tokens, index):
token = tokens[index]
if self.debug.get('local_print', False):
print(self.text)
print(' ' * (token.offset + len(str(token.value))) + '^')
print("Syntax error at or near token '%s'" % token.value)
if 'context' in self.debug and self.debug['context']:
super(LocationParser, self).error(tokens, index)
raise LocationError(self.text,
' ' * (token.offset + len(str(token.value))) + '^')
def nonterminal(self, nt, args):
has_len = hasattr(args, '__len__')
collect = ('tokens',)
if nt in collect:
#
# Collect iterated thingies together.
#
rv = args[0]
for arg in args[1:]:
rv.append(arg)
if (has_len and len(args) == 1 and
hasattr(args[0], '__len__') and len(args[0]) == 1):
# Remove singleton derivations
rv = GenericASTBuilder.nonterminal(self, nt, args[0])
del args[0] # save memory
else:
rv = GenericASTBuilder.nonterminal(self, nt, args)
return rv
##########################################################
# Expression grammar rules. Grammar rule functions
# start with the name p_ and are collected automatically
##########################################################
def p_bp_location(self, args):
'''
bp_start ::= opt_space location_if opt_space
'''
# "disasm" command range which might refer to locations, ranges, and addresses
def p_asm_range(self, args):
'''
arange_start ::= opt_space arange
arange ::= range
arange ::= addr_location opt_space COMMA opt_space NUMBER
arange ::= addr_location opt_space COMMA opt_space OFFSET
arange ::= addr_location opt_space COMMA opt_space ADDRESS
arange ::= location opt_space COMMA opt_space ADDRESS
arange ::= addr_location opt_space COMMA
arange ::= addr_location
# Unlike ranges, We don't allow ending at an address
# arange ::= COMMA opt_space addr_location
addr_location ::= location
addr_location ::= ADDRESS
'''
# "list" command range which may refer to locations
def p_list_range(self, args):
'''
range_start ::= opt_space range
range ::= location opt_space COMMA opt_space NUMBER
range ::= location opt_space COMMA opt_space OFFSET
range ::= COMMA opt_space location
range ::= location opt_space COMMA
range ::= location
range ::= DIRECTION
'''
# location that is used in breakpoints, list commands, and disassembly
def p_location(self, args):
'''
opt_space ::= SPACE?
location_if ::= location
location_if ::= location SPACE IF tokens
# Note no space is allowed between FILENAME and NUMBER
location ::= FILENAME COLON NUMBER
location ::= FUNCNAME
# If just a number is given, the the filename is implied
location ::= NUMBER
location ::= METHOD
location ::= OFFSET
# For tokens we accept anything. Were really just
# going to use the underlying string from the part
# after "if". So below we all of the possible tokens
tokens ::= token+
token ::= COLON
token ::= COMMA
token ::= DIRECTION
token ::= FILENAME
token ::= FUNCNAME
token ::= NUMBER
token ::= OFFSET
token ::= SPACE
'''
def parse_location(start_symbol, text, out=sys.stdout,
show_tokens=False, parser_debug=DEFAULT_DEBUG):
assert isinstance(text, str)
tokens = LocationScanner().tokenize(text)
if show_tokens:
for t in tokens:
print(t)
# For heavy grammar debugging
# parser_debug = {'rules': True, 'transition': True, 'reduce': True,
# 'errorstack': True, 'dups': True}
# parser_debug = {'rules': False, 'transition': False, 'reduce': True,
# 'errorstack': 'full', 'dups': False}
parser = LocationParser(start_symbol, text, parser_debug)
parser.check_grammar(frozenset(('bp_start', 'range_start', 'arange_start')))
return parser.parse(tokens)
def parse_bp_location(*args, **kwargs):
return parse_location('bp_start', *args, **kwargs)
def parse_range(*args, **kwargs):
return parse_location('range_start', *args, **kwargs)
def parse_arange(*args, **kwargs):
return parse_location('arange_start', *args, **kwargs)
if __name__ == '__main__':
def doit(fn, line):
try:
ast = fn(line, show_tokens=True)
print(ast)
except ScannerError as e:
print("Scanner error")
print(e.text)
print(e.text_cursor)
except LocationError as e:
print("Parser error at or near")
print(e.text)
print(e.text_cursor)
# FIXME: we should make sure all of the below is in a unit test.
lines = """
/tmp/foo.py:12
12
../foo.py:5
gcd()
foo.py:5 if x > 1
""".splitlines()
for line in lines:
if not line.strip():
continue
print("=" * 30)
print(line)
print("+" * 30)
doit(parse_bp_location, line)
# bad_lines = """
# /tmp/foo.py
# '''/tmp/foo.py'''
# /tmp/foo.py 12
# gcd()
# foo.py if x > 1
# """.splitlines()
# for line in bad_lines:
# if not line.strip():
# continue
# print("=" * 30)
# print(line)
# print("+" * 30)
# doit(parse_bp_location, line)
# lines = """
# 1
# 2,
# ,3
# 4,10
# """.splitlines()
# for line in lines:
# if not line.strip():
# continue
# print("=" * 30)
# print(line)
# print("+" * 30)
# doit(parse_range, line)
# print(ast)
lines = (
"*0",
"*1 ,",
"2 , *10",
"2, 10",
"*3, 10",
"sys.exit() , *20"
)
for line in lines:
line = line.strip()
if not line:
continue
print("=" * 30)
print(line)
print("+" * 30)
doit(parse_arange, line)
| 29.98008
| 82
| 0.562791
|
from __future__ import print_function
import sys
from spark_parser.ast import AST
from gdbloc.scanner import LocationScanner, ScannerError
from spark_parser import GenericASTBuilder, DEFAULT_DEBUG
class LocationError(Exception):
def __init__(self, text, text_cursor):
self.text = text
self.text_cursor = text_cursor
def __str__(self):
return self.text + "\n" + self.text_cursor
class LocationParser(GenericASTBuilder):
def __init__(self, start_nt, text, debug=DEFAULT_DEBUG):
super(LocationParser, self).__init__(AST, start_nt, debug=debug)
self.debug = debug
self.text = text
def error(self, tokens, index):
token = tokens[index]
if self.debug.get('local_print', False):
print(self.text)
print(' ' * (token.offset + len(str(token.value))) + '^')
print("Syntax error at or near token '%s'" % token.value)
if 'context' in self.debug and self.debug['context']:
super(LocationParser, self).error(tokens, index)
raise LocationError(self.text,
' ' * (token.offset + len(str(token.value))) + '^')
def nonterminal(self, nt, args):
has_len = hasattr(args, '__len__')
collect = ('tokens',)
if nt in collect:
rv = args[0]
for arg in args[1:]:
rv.append(arg)
if (has_len and len(args) == 1 and
hasattr(args[0], '__len__') and len(args[0]) == 1):
rv = GenericASTBuilder.nonterminal(self, nt, args[0])
del args[0]
else:
rv = GenericASTBuilder.nonterminal(self, nt, args)
return rv
| true
| true
|
7906d608e7fc287720cb89ba3cf03f982d2deb89
| 5,420
|
py
|
Python
|
rlpy/stats/models/_basic.py
|
evenmarbles/rlpy
|
3c3c39a316285ca725268e81aef030e5c764f797
|
[
"0BSD"
] | 10
|
2015-11-12T18:48:53.000Z
|
2021-06-22T05:54:11.000Z
|
rlpy/stats/models/_basic.py
|
evenmarbles/rlpy
|
3c3c39a316285ca725268e81aef030e5c764f797
|
[
"0BSD"
] | 2
|
2018-06-16T02:37:31.000Z
|
2018-11-05T16:42:24.000Z
|
rlpy/stats/models/_basic.py
|
evenmarbles/rlpy
|
3c3c39a316285ca725268e81aef030e5c764f797
|
[
"0BSD"
] | 6
|
2015-11-30T10:32:08.000Z
|
2020-08-24T01:32:35.000Z
|
from __future__ import division, print_function, absolute_import
# noinspection PyUnresolvedReferences
from six.moves import range
import numpy as np
from scipy.misc import doccer
from ...stats import nonuniform
from ...auxiliary.array import normalize, nunique, accum
__all__ = ['markov']
_doc_default_callparams = """\
startprob : array_like
Start probabilities.
transmat : array_like
Transition matrix.
"""
_doc_frozen_callparams = ""
_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
docdict_params = {
'_doc_default_callparams': _doc_default_callparams,
}
docdict_noparams = {
'_doc_default_callparams': _doc_frozen_callparams,
}
# noinspection PyPep8Naming
class markov_gen(object):
"""Markov model.
The `startprob` keyword specifies the start probabilities for the model.
The `transmat` keyword specifies the transition probabilities the model
follows.
Methods
-------
score(x, startprob, transmat)
Log probability of the given data `x`.
sample(x, startprob, transmat, size=1)
Draw random samples from a Markov model.
fit(x)
Fits a Markov model from data via MLE or MAP.
Parameters
----------
%(_doc_default_callparams)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Markov model:
rv = normal_invwishart(startprob=None, transmat=None)
- Frozen object with the same methods but holding the given
start probabilities and transitions fixed.
Examples
--------
>>> from mlpy.stats.models import markov
>>> startprob = np.array([0.1, 0.4, 0.5])
>>> transmat = np.array([[0.3, 0.2, 0.5], [0.6, 0.3, 0.1], [0.1, 0.5, 0.4]])
>>> m = markov(startprob, transmat)
>>> m.sample(size=2)
[[2 2]]
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
def __init__(self):
super(markov_gen, self).__init__()
self.__doc__ = doccer.docformat(self.__doc__, docdict_params)
def __call__(self, startprob, transmat):
markov_frozen(startprob, transmat)
def score(self, x, startprob, transmat):
"""Log probability for a given data `x`.
Attributes
----------
x : ndarray
Data to evaluate.
%(_doc_default_callparams)s
Returns
-------
log_prob : float
The log probability of the data.
"""
log_transmat = np.log(transmat + np.finfo(float).eps)
log_startprob = np.log(startprob + np.finfo(float).eps)
log_prior = log_startprob[x[:, 0]]
n = x.shape[0]
nstates = log_startprob.shape[0]
logp = np.zeros(n)
for i in range(n):
njk = accum(np.vstack([x[i, 0:-1], x[i, 1::]]).T, 1, size=(nstates, nstates), dtype=np.int32)
logp[i] = np.sum(njk * log_transmat)
return logp + log_prior
def sample(self, startprob, transmat, size=1):
"""Sample from a Markov model.
Attributes
----------
size: int
Defining number of sampled variates. Defaults to `1`.
Returns
-------
vals: ndarray
The sampled sequences of size (nseq, seqlen).
"""
if np.isscalar(size):
size = (1, size)
vals = np.zeros(size, dtype=np.int32)
nseq, seqlen = size
for i in range(nseq):
vals[i][0] = nonuniform.rvs(startprob)
for t in range(1, seqlen):
vals[i][t] = nonuniform.rvs(transmat[vals[i][t - 1]])
return vals
def fit(self, x):
"""Fit a Markov model from data via MLE or MAP.
Attributes
----------
x : ndarray[int]
Observed data
Returns
-------
%(_doc_default_callparams)s
"""
# TODO: allow to pass pseudo_counts as parameter?
nstates = nunique(x.ravel())
pi_pseudo_counts = np.ones(nstates)
transmat_pseudo_counts = np.ones((nstates, nstates))
n = x.shape[0]
startprob = normalize(np.bincount(x[:, 0])) + pi_pseudo_counts - 1
counts = np.zeros((nstates, nstates))
for i in range(n):
counts += accum(np.vstack([x[i, 0:-1], x[i, 1::]]).T, 1, size=(nstates, nstates))
transmat = normalize(counts + transmat_pseudo_counts - 1, 1)
return startprob, transmat
markov = markov_gen()
# noinspection PyPep8Naming
class markov_frozen(object):
def __init__(self, startprob, transmat):
"""Create a "frozen" Markov model.
Parameters
----------
startprob : array_like
Start probabilities
transmat : array_like
Transition matrix
"""
self._model = markov_gen()
self.startprob = startprob
self.transmat = transmat
def score(self, x):
return self._model.score(x, self.startprob, self.transmat)
def sample(self, size=1):
return self._model.sample(self.startprob, self.transmat, size)
| 27.236181
| 117
| 0.606827
|
from __future__ import division, print_function, absolute_import
from six.moves import range
import numpy as np
from scipy.misc import doccer
from ...stats import nonuniform
from ...auxiliary.array import normalize, nunique, accum
__all__ = ['markov']
_doc_default_callparams = """\
startprob : array_like
Start probabilities.
transmat : array_like
Transition matrix.
"""
_doc_frozen_callparams = ""
_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
docdict_params = {
'_doc_default_callparams': _doc_default_callparams,
}
docdict_noparams = {
'_doc_default_callparams': _doc_frozen_callparams,
}
class markov_gen(object):
def __init__(self):
super(markov_gen, self).__init__()
self.__doc__ = doccer.docformat(self.__doc__, docdict_params)
def __call__(self, startprob, transmat):
markov_frozen(startprob, transmat)
def score(self, x, startprob, transmat):
log_transmat = np.log(transmat + np.finfo(float).eps)
log_startprob = np.log(startprob + np.finfo(float).eps)
log_prior = log_startprob[x[:, 0]]
n = x.shape[0]
nstates = log_startprob.shape[0]
logp = np.zeros(n)
for i in range(n):
njk = accum(np.vstack([x[i, 0:-1], x[i, 1::]]).T, 1, size=(nstates, nstates), dtype=np.int32)
logp[i] = np.sum(njk * log_transmat)
return logp + log_prior
def sample(self, startprob, transmat, size=1):
if np.isscalar(size):
size = (1, size)
vals = np.zeros(size, dtype=np.int32)
nseq, seqlen = size
for i in range(nseq):
vals[i][0] = nonuniform.rvs(startprob)
for t in range(1, seqlen):
vals[i][t] = nonuniform.rvs(transmat[vals[i][t - 1]])
return vals
def fit(self, x):
nstates = nunique(x.ravel())
pi_pseudo_counts = np.ones(nstates)
transmat_pseudo_counts = np.ones((nstates, nstates))
n = x.shape[0]
startprob = normalize(np.bincount(x[:, 0])) + pi_pseudo_counts - 1
counts = np.zeros((nstates, nstates))
for i in range(n):
counts += accum(np.vstack([x[i, 0:-1], x[i, 1::]]).T, 1, size=(nstates, nstates))
transmat = normalize(counts + transmat_pseudo_counts - 1, 1)
return startprob, transmat
markov = markov_gen()
class markov_frozen(object):
def __init__(self, startprob, transmat):
self._model = markov_gen()
self.startprob = startprob
self.transmat = transmat
def score(self, x):
return self._model.score(x, self.startprob, self.transmat)
def sample(self, size=1):
return self._model.sample(self.startprob, self.transmat, size)
| true
| true
|
7906d61801dffd70cb124dfeedad333699e40a3d
| 3,868
|
py
|
Python
|
sdk/recoveryservices/azure-mgmt-recoveryservicessiterecovery/azure/mgmt/recoveryservicessiterecovery/aio/_configuration.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/recoveryservices/azure-mgmt-recoveryservicessiterecovery/azure/mgmt/recoveryservicessiterecovery/aio/_configuration.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/recoveryservices/azure-mgmt-recoveryservicessiterecovery/azure/mgmt/recoveryservicessiterecovery/aio/_configuration.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class SiteRecoveryManagementClientConfiguration(Configuration):
"""Configuration for SiteRecoveryManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The subscription Id.
:type subscription_id: str
:param resource_group_name: The name of the resource group where the recovery services vault is present.
:type resource_group_name: str
:param resource_name: The name of the recovery services vault.
:type resource_name: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if resource_group_name is None:
raise ValueError("Parameter 'resource_group_name' must not be None.")
if resource_name is None:
raise ValueError("Parameter 'resource_name' must not be None.")
super(SiteRecoveryManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.resource_group_name = resource_group_name
self.resource_name = resource_name
self.api_version = "2021-06-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-recoveryservicessiterecovery/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| 48.35
| 134
| 0.70243
|
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
class SiteRecoveryManagementClientConfiguration(Configuration):
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if resource_group_name is None:
raise ValueError("Parameter 'resource_group_name' must not be None.")
if resource_name is None:
raise ValueError("Parameter 'resource_name' must not be None.")
super(SiteRecoveryManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.resource_group_name = resource_group_name
self.resource_name = resource_name
self.api_version = "2021-06-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-recoveryservicessiterecovery/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| true
| true
|
7906d8ade39ac3b130396371247f68fd6a0774ea
| 1,058
|
py
|
Python
|
security/onap_security/test_security_test.py
|
onap/integration-xtesting
|
a2b118029680f62e053211a9fd9443308286a31c
|
[
"Apache-2.0"
] | 1
|
2021-10-15T15:18:53.000Z
|
2021-10-15T15:18:53.000Z
|
security/onap_security/test_security_test.py
|
onap/integration-xtesting
|
a2b118029680f62e053211a9fd9443308286a31c
|
[
"Apache-2.0"
] | null | null | null |
security/onap_security/test_security_test.py
|
onap/integration-xtesting
|
a2b118029680f62e053211a9fd9443308286a31c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2018 All rights reserved
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
"""Define the classes required to fully cover k8s."""
import logging
import os
import unittest
from security_tests import SecurityTesting
class SecurityTests(unittest.TestCase):
# pylint: disable=missing-docstring
def setUp(self):
os.environ["DEPLOY_SCENARIO"] = "k8-test"
os.environ["KUBE_MASTER_IP"] = "127.0.0.1"
os.environ["KUBE_MASTER_URL"] = "https://127.0.0.1:6443"
os.environ["KUBERNETES_PROVIDER"] = "local"
self.security_stesting = SecurityTesting.SecurityTesting()
def test_run_kubetest_cmd_none(self):
with self.assertRaises(TypeError):
self.security_stesting.run_security()
if __name__ == "__main__":
logging.disable(logging.CRITICAL)
unittest.main(verbosity=2)
| 25.804878
| 71
| 0.713611
|
import logging
import os
import unittest
from security_tests import SecurityTesting
class SecurityTests(unittest.TestCase):
def setUp(self):
os.environ["DEPLOY_SCENARIO"] = "k8-test"
os.environ["KUBE_MASTER_IP"] = "127.0.0.1"
os.environ["KUBE_MASTER_URL"] = "https://127.0.0.1:6443"
os.environ["KUBERNETES_PROVIDER"] = "local"
self.security_stesting = SecurityTesting.SecurityTesting()
def test_run_kubetest_cmd_none(self):
with self.assertRaises(TypeError):
self.security_stesting.run_security()
if __name__ == "__main__":
logging.disable(logging.CRITICAL)
unittest.main(verbosity=2)
| true
| true
|
7906dab72d9236f735c802775a9ef6520b9a646e
| 680
|
py
|
Python
|
scripts/report_gen.py
|
aw32/sched
|
b6ef35c5b517875a5954c70e2dc366fab3721a60
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/report_gen.py
|
aw32/sched
|
b6ef35c5b517875a5954c70e2dc366fab3721a60
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/report_gen.py
|
aw32/sched
|
b6ef35c5b517875a5954c70e2dc366fab3721a60
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2019, Alex Wiens <awiens@mail.upb.de>, Achim Lösch <achim.loesch@upb.de>
# SPDX-License-Identifier: BSD-2-Clause
import os
import os.path
import subprocess
import test as schedtest
import plot
def hostname():
return subprocess.getoutput("hostname")
if __name__ == "__main__":
cwd = os.getcwd()
testname = os.path.basename(cwd)
host = os.environ if "SCHED_HOST" in os.environ else hostname()
for testtype in ["sim","exp"]:
test = schedtest.SchedTest.loadTest(testtype, testname=testname, resultdir=cwd, host=host)
if test != None and test.loadTestLog():
test.generate_report()
else:
print("log for",testtype,"not found")
| 23.448276
| 92
| 0.726471
|
import os
import os.path
import subprocess
import test as schedtest
import plot
def hostname():
return subprocess.getoutput("hostname")
if __name__ == "__main__":
cwd = os.getcwd()
testname = os.path.basename(cwd)
host = os.environ if "SCHED_HOST" in os.environ else hostname()
for testtype in ["sim","exp"]:
test = schedtest.SchedTest.loadTest(testtype, testname=testname, resultdir=cwd, host=host)
if test != None and test.loadTestLog():
test.generate_report()
else:
print("log for",testtype,"not found")
| true
| true
|
7906dac074880ad5d64e71289a4fb936885ee4f3
| 493
|
py
|
Python
|
Python3/Python3_Lesson09/src/reprmagic.py
|
ceeblet/OST_PythonCertificationTrack
|
042e0ce964bc88b3f4132dcbd7e06c5f504eae34
|
[
"MIT"
] | null | null | null |
Python3/Python3_Lesson09/src/reprmagic.py
|
ceeblet/OST_PythonCertificationTrack
|
042e0ce964bc88b3f4132dcbd7e06c5f504eae34
|
[
"MIT"
] | null | null | null |
Python3/Python3_Lesson09/src/reprmagic.py
|
ceeblet/OST_PythonCertificationTrack
|
042e0ce964bc88b3f4132dcbd7e06c5f504eae34
|
[
"MIT"
] | null | null | null |
"""
Demonstrate differences between __str__() and __reper__().
"""
class neither:
pass
class stronly:
def __str__(self):
return "STR"
class repronly:
def __repr__(self):
return "REPR"
class both(stronly, repronly):
pass
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def __str__(self):
return self.name
def __repr__(self):
return "Person({0.name!r}, {0.age!r})".format(self)
| 18.961538
| 59
| 0.600406
|
class neither:
pass
class stronly:
def __str__(self):
return "STR"
class repronly:
def __repr__(self):
return "REPR"
class both(stronly, repronly):
pass
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def __str__(self):
return self.name
def __repr__(self):
return "Person({0.name!r}, {0.age!r})".format(self)
| true
| true
|
7906dbd0fffac6fa2453ec6b028b0b9623ac5c12
| 2,975
|
py
|
Python
|
bip/base/bipidb.py
|
paulfariello-syn/bip
|
901adc4ee368cd02666410099e9382b068f7ae68
|
[
"BSD-3-Clause"
] | 145
|
2020-08-13T16:54:33.000Z
|
2022-03-06T09:20:54.000Z
|
bip/base/bipidb.py
|
paulfariello-syn/bip
|
901adc4ee368cd02666410099e9382b068f7ae68
|
[
"BSD-3-Clause"
] | 10
|
2020-08-14T18:00:47.000Z
|
2022-03-25T00:34:16.000Z
|
bip/base/bipidb.py
|
paulfariello-syn/bip
|
901adc4ee368cd02666410099e9382b068f7ae68
|
[
"BSD-3-Clause"
] | 20
|
2020-08-14T17:56:00.000Z
|
2022-03-28T16:16:03.000Z
|
# define BipIdb and some helper functions for easier scripting (at the end).
import ida_kernwin
import idaapi
import idc
class BipIdb(object):
"""
Class for representing the idb loaded by IDA, this has the goal to
provide access to things specific to the IDB.
Currently this contain only static methods.
"""
@staticmethod
def ptr_size():
"""
Return the number of bits in a pointer.
:rtype: int
"""
info = idaapi.get_inf_structure()
if info.is_64bit():
bits = 64
elif info.is_32bit():
bits = 32
else:
bits = 16
return bits
@staticmethod
def min_ea():
"""
Return the lowest mapped address of the IDB.
"""
return idc.get_inf_attr(idc.INF_MIN_EA)
@staticmethod
def max_ea():
"""
Return the highest mapped address of the IDB.
"""
return idc.get_inf_attr(idc.INF_MAX_EA)
@staticmethod
def image_base():
"""
Return the base address of the image loaded in the IDB.
This is different from :meth:`~BipIdb.min_ea` which is the lowest
*mapped* address.
"""
return idaapi.get_imagebase()
@staticmethod
def current_addr():
"""
Return current screen address.
:return: The current address selected.
"""
return ida_kernwin.get_screen_ea()
@staticmethod
def relea(addr):
"""
Calculate the relative address compare to the IDA image base.
The calcul done is ``ADDR - IMGBASE``.
The opposite of this function is :func:`absea`.
:param int addr: The absolute address to translate.
:return: The offset from image base corresponding to ``addr``.
:rtype: int
"""
return addr-idaapi.get_imagebase()
@staticmethod
def absea(offset):
"""
Calculate the absolute address from an offset of the image base.
The calcul done is ``OFFSET + IMGBASE`` .
The opposite of this function is :func:`relea`.
:param int offset: The offset from the beginning of the image base
to translate.
:return: The absolute address corresponding to the offset.
:rtype: int
"""
return offset+idaapi.get_imagebase()
def min_ea():
"""
Return the lowest mapped address of the IDB.
Wrapper on :meth:`BipIdb.min_ea`.
"""
return BipIdb.min_ea()
def max_ea():
"""
Return the highest mapped address of the IDB.
Wrapper on :meth:`BipIdb.max_ea`.
"""
return BipIdb.max_ea()
def Here():
"""
Return current screen address.
:return: The current address.
"""
return BipIdb.current_addr()
| 24.791667
| 78
| 0.558319
|
import ida_kernwin
import idaapi
import idc
class BipIdb(object):
@staticmethod
def ptr_size():
info = idaapi.get_inf_structure()
if info.is_64bit():
bits = 64
elif info.is_32bit():
bits = 32
else:
bits = 16
return bits
@staticmethod
def min_ea():
return idc.get_inf_attr(idc.INF_MIN_EA)
@staticmethod
def max_ea():
return idc.get_inf_attr(idc.INF_MAX_EA)
@staticmethod
def image_base():
return idaapi.get_imagebase()
@staticmethod
def current_addr():
return ida_kernwin.get_screen_ea()
@staticmethod
def relea(addr):
return addr-idaapi.get_imagebase()
@staticmethod
def absea(offset):
return offset+idaapi.get_imagebase()
def min_ea():
return BipIdb.min_ea()
def max_ea():
return BipIdb.max_ea()
def Here():
return BipIdb.current_addr()
| true
| true
|
7906dc20bfa48b9577f568714b2b045210e106f4
| 660
|
py
|
Python
|
examples/pygazebo_sample/ray_sensor.py
|
masayoshi-nakamura/CognitiveArchitectureLecture
|
5e036b48e92f266062eb7be8a366e754dee24f2c
|
[
"Apache-2.0"
] | 4
|
2016-03-13T03:01:28.000Z
|
2016-03-31T02:51:56.000Z
|
examples/pygazebo_sample/ray_sensor.py
|
masayoshi-nakamura/CognitiveArchitectureLecture
|
5e036b48e92f266062eb7be8a366e754dee24f2c
|
[
"Apache-2.0"
] | null | null | null |
examples/pygazebo_sample/ray_sensor.py
|
masayoshi-nakamura/CognitiveArchitectureLecture
|
5e036b48e92f266062eb7be8a366e754dee24f2c
|
[
"Apache-2.0"
] | null | null | null |
import trollius
from trollius import From
from pprint import pprint
import pygazebo.msg.raysensor_pb2
@trollius.coroutine
def publish_loop():
manager = yield From(pygazebo.connect())
def callback(data):
ray = pygazebo.msg.raysensor_pb2.RaySensor()
msg = ray.FromString(data)
subscriber = manager.subscribe(
'/gazebo/default/turtlebot/rack/laser/scan',
'gazebo.msgs.RaySensor',
callback)
yield From(subscriber.wait_for_connection())
while True:
yield From(trollius.sleep(1.00))
if __name__ == "__main__":
loop = trollius.get_event_loop()
loop.run_until_complete(publish_loop())
| 23.571429
| 52
| 0.69697
|
import trollius
from trollius import From
from pprint import pprint
import pygazebo.msg.raysensor_pb2
@trollius.coroutine
def publish_loop():
manager = yield From(pygazebo.connect())
def callback(data):
ray = pygazebo.msg.raysensor_pb2.RaySensor()
msg = ray.FromString(data)
subscriber = manager.subscribe(
'/gazebo/default/turtlebot/rack/laser/scan',
'gazebo.msgs.RaySensor',
callback)
yield From(subscriber.wait_for_connection())
while True:
yield From(trollius.sleep(1.00))
if __name__ == "__main__":
loop = trollius.get_event_loop()
loop.run_until_complete(publish_loop())
| true
| true
|
7906dcba32b697bcec352e5844015ac3dc78b645
| 8,992
|
py
|
Python
|
experiments/avi/eric_grasp_sac_pixel.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/avi/eric_grasp_sac_pixel.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/avi/eric_grasp_sac_pixel.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
import copy
import gym
import numpy as np
import torch.nn as nn
import railrl.misc.hyperparameter as hyp
import railrl.torch.pytorch_util as ptu
from railrl.data_management.obs_dict_replay_buffer import \
ObsDictReplayBuffer
from railrl.launchers.launcher_util import run_experiment
# from railrl.samplers.data_collector import MdpPathCollector
# from railrl.samplers.data_collector.step_collector import MdpStepCollector
from railrl.samplers.data_collector.path_collector import ObsDictPathCollector
from railrl.samplers.data_collector.step_collector import ObsDictStepCollector
from railrl.visualization.video import VideoSaveFunctionBullet
from railrl.misc.buffer_save import BufferSaveFunction
from railrl.torch.networks import (
CNN,
MlpQfWithObsProcessor,
Split,
FlattenEach,
Concat,
Flatten,
)
from railrl.torch.sac.policies import (
MakeDeterministic, TanhGaussianPolicyAdapter,
)
from railrl.torch.sac.sac import SACTrainer
from railrl.torch.torch_rl_algorithm import (
TorchBatchRLAlgorithm,
TorchOnlineRLAlgorithm,
)
import os.path as osp
from experiments.avi.env_wrappers import FlatEnv
PARENT_DIR = '/media/avi/data/Work/github/'
import sys
env_file = osp.join(PARENT_DIR, 'avisingh599/google-research/dql_grasping/')
sys.path.insert(1, env_file)
from grasping_env import KukaGraspingProceduralEnv
def experiment(variant):
env_params = dict(
block_random=0.3,
camera_random=0,
simple_observations=False,
continuous=True,
remove_height_hack=True,
render_mode="DIRECT",
# render_mode="GUI",
num_objects=5,
max_num_training_models=900,
target=False,
test=False,
)
expl_env = FlatEnv(KukaGraspingProceduralEnv(**env_params))
eval_env = expl_env
img_width, img_height = eval_env.image_shape
num_channels = 3
action_dim = int(np.prod(eval_env.action_space.shape))
cnn_params = variant['cnn_params']
cnn_params.update(
input_width=img_width,
input_height=img_height,
input_channels=num_channels,
added_fc_input_size=0,
output_conv_channels=True,
output_size=None,
)
qf_cnn = CNN(**cnn_params)
qf_obs_processor = nn.Sequential(
qf_cnn,
Flatten(),
)
qf_kwargs = copy.deepcopy(variant['qf_kwargs'])
qf_kwargs['obs_processor'] = qf_obs_processor
qf_kwargs['output_size'] = 1
qf_kwargs['input_size'] = (
action_dim + qf_cnn.conv_output_flat_size
)
qf1 = MlpQfWithObsProcessor(**qf_kwargs)
qf2 = MlpQfWithObsProcessor(**qf_kwargs)
target_qf_cnn = CNN(**cnn_params)
target_qf_obs_processor = nn.Sequential(
target_qf_cnn,
Flatten(),
)
target_qf_kwargs = copy.deepcopy(variant['qf_kwargs'])
target_qf_kwargs['obs_processor'] = target_qf_obs_processor
target_qf_kwargs['output_size'] = 1
target_qf_kwargs['input_size'] = (
action_dim + target_qf_cnn.conv_output_flat_size
)
target_qf1 = MlpQfWithObsProcessor(**target_qf_kwargs)
target_qf2 = MlpQfWithObsProcessor(**target_qf_kwargs)
action_dim = int(np.prod(eval_env.action_space.shape))
policy_cnn = CNN(**cnn_params)
policy_obs_processor = nn.Sequential(
policy_cnn,
Flatten(),
)
policy = TanhGaussianPolicyAdapter(
policy_obs_processor,
policy_cnn.conv_output_flat_size,
action_dim,
**variant['policy_kwargs']
)
observation_key = 'image'
eval_policy = MakeDeterministic(policy)
eval_path_collector = ObsDictPathCollector(
eval_env,
eval_policy,
observation_key=observation_key,
**variant['eval_path_collector_kwargs']
)
replay_buffer = ObsDictReplayBuffer(
variant['replay_buffer_size'],
expl_env,
observation_key=observation_key,
)
trainer = SACTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant['trainer_kwargs']
)
if variant['collection_mode'] == 'batch':
expl_path_collector = ObsDictPathCollector(
expl_env,
policy,
observation_key=observation_key,
**variant['expl_path_collector_kwargs']
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algo_kwargs']
)
elif variant['collection_mode'] == 'online':
expl_path_collector = ObsDictStepCollector(
expl_env,
policy,
observation_key=observation_key,
**variant['expl_path_collector_kwargs']
)
algorithm = TorchOnlineRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algo_kwargs']
)
else:
raise NotImplementedError
video_func = VideoSaveFunctionBullet(variant)
algorithm.post_train_funcs.append(video_func)
# dump_buffer_func = BufferSaveFunction(variant)
# algorithm.post_train_funcs.append(dump_buffer_func)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
variant = dict(
trainer_kwargs=dict(
discount=0.99,
# soft_target_tau=5e-3,
# target_update_period=1,
soft_target_tau=1.0,
target_update_period=1000,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
use_automatic_entropy_tuning=True,
),
algo_kwargs=dict(
batch_size=256,
max_path_length=15,
num_epochs=5000,
num_eval_steps_per_epoch=45,
num_expl_steps_per_train_loop=300,
num_trains_per_train_loop=300,
min_num_steps_before_training=10*300,
# max_path_length=10,
# num_epochs=100,
# num_eval_steps_per_epoch=100,
# num_expl_steps_per_train_loop=100,
# num_trains_per_train_loop=100,
# min_num_steps_before_training=100,
),
cnn_params=dict(
kernel_sizes=[3, 3],
n_channels=[4, 4],
strides=[1, 1],
hidden_sizes=[32, 32],
paddings=[1, 1],
pool_type='max2d',
pool_sizes=[2, 2],
pool_strides=[2, 2],
pool_paddings=[0, 0],
),
# replay_buffer_size=int(1E6),
qf_kwargs=dict(
hidden_sizes=[256, 256],
),
policy_kwargs=dict(
hidden_sizes=[256, 256],
),
dump_video_kwargs=dict(
imsize=48,
save_video_period=1,
),
logger_config=dict(
snapshot_gap=10,
),
dump_buffer_kwargs=dict(
dump_buffer_period=50,
),
replay_buffer_size=int(5E5),
expl_path_collector_kwargs=dict(),
eval_path_collector_kwargs=dict(),
shared_qf_conv=False,
use_robot_state=False,
randomize_env=True,
)
import argparse
parser = argparse.ArgumentParser()
# parser.add_argument("--env", type=str, required=True,
# choices=('SawyerReach-v0', 'SawyerGraspOne-v0'))
# parser.add_argument("--obs", required=True, type=str, choices=('pixels', 'pixels_debug'))
parser.add_argument("--gpu", type=int, default=1)
args = parser.parse_args()
variant['env'] = 'KukaGraspingProceduralEnv'
variant['obs'] = 'pixels'
n_seeds = 1
mode = 'local'
exp_prefix = 'dev-{}'.format(
__file__.replace('/', '-').replace('_', '-').split('.')[0]
)
exp_prefix = 'railrl-bullet-{}-{}'.format(variant['env'], variant['obs'])
# n_seeds = 5
# mode = 'ec2'
# exp_prefix = 'railrl-bullet-sawyer-image-reach'
search_space = {
'shared_qf_conv': [
True,
# False,
],
'collection_mode': [
# 'batch',
'online',
]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
experiment,
exp_name=exp_prefix,
mode=mode,
variant=variant,
use_gpu=True,
gpu_id=args.gpu,
unpack_variant=False,
)
| 29.973333
| 95
| 0.632006
|
import copy
import gym
import numpy as np
import torch.nn as nn
import railrl.misc.hyperparameter as hyp
import railrl.torch.pytorch_util as ptu
from railrl.data_management.obs_dict_replay_buffer import \
ObsDictReplayBuffer
from railrl.launchers.launcher_util import run_experiment
from railrl.samplers.data_collector.path_collector import ObsDictPathCollector
from railrl.samplers.data_collector.step_collector import ObsDictStepCollector
from railrl.visualization.video import VideoSaveFunctionBullet
from railrl.misc.buffer_save import BufferSaveFunction
from railrl.torch.networks import (
CNN,
MlpQfWithObsProcessor,
Split,
FlattenEach,
Concat,
Flatten,
)
from railrl.torch.sac.policies import (
MakeDeterministic, TanhGaussianPolicyAdapter,
)
from railrl.torch.sac.sac import SACTrainer
from railrl.torch.torch_rl_algorithm import (
TorchBatchRLAlgorithm,
TorchOnlineRLAlgorithm,
)
import os.path as osp
from experiments.avi.env_wrappers import FlatEnv
PARENT_DIR = '/media/avi/data/Work/github/'
import sys
env_file = osp.join(PARENT_DIR, 'avisingh599/google-research/dql_grasping/')
sys.path.insert(1, env_file)
from grasping_env import KukaGraspingProceduralEnv
def experiment(variant):
env_params = dict(
block_random=0.3,
camera_random=0,
simple_observations=False,
continuous=True,
remove_height_hack=True,
render_mode="DIRECT",
num_objects=5,
max_num_training_models=900,
target=False,
test=False,
)
expl_env = FlatEnv(KukaGraspingProceduralEnv(**env_params))
eval_env = expl_env
img_width, img_height = eval_env.image_shape
num_channels = 3
action_dim = int(np.prod(eval_env.action_space.shape))
cnn_params = variant['cnn_params']
cnn_params.update(
input_width=img_width,
input_height=img_height,
input_channels=num_channels,
added_fc_input_size=0,
output_conv_channels=True,
output_size=None,
)
qf_cnn = CNN(**cnn_params)
qf_obs_processor = nn.Sequential(
qf_cnn,
Flatten(),
)
qf_kwargs = copy.deepcopy(variant['qf_kwargs'])
qf_kwargs['obs_processor'] = qf_obs_processor
qf_kwargs['output_size'] = 1
qf_kwargs['input_size'] = (
action_dim + qf_cnn.conv_output_flat_size
)
qf1 = MlpQfWithObsProcessor(**qf_kwargs)
qf2 = MlpQfWithObsProcessor(**qf_kwargs)
target_qf_cnn = CNN(**cnn_params)
target_qf_obs_processor = nn.Sequential(
target_qf_cnn,
Flatten(),
)
target_qf_kwargs = copy.deepcopy(variant['qf_kwargs'])
target_qf_kwargs['obs_processor'] = target_qf_obs_processor
target_qf_kwargs['output_size'] = 1
target_qf_kwargs['input_size'] = (
action_dim + target_qf_cnn.conv_output_flat_size
)
target_qf1 = MlpQfWithObsProcessor(**target_qf_kwargs)
target_qf2 = MlpQfWithObsProcessor(**target_qf_kwargs)
action_dim = int(np.prod(eval_env.action_space.shape))
policy_cnn = CNN(**cnn_params)
policy_obs_processor = nn.Sequential(
policy_cnn,
Flatten(),
)
policy = TanhGaussianPolicyAdapter(
policy_obs_processor,
policy_cnn.conv_output_flat_size,
action_dim,
**variant['policy_kwargs']
)
observation_key = 'image'
eval_policy = MakeDeterministic(policy)
eval_path_collector = ObsDictPathCollector(
eval_env,
eval_policy,
observation_key=observation_key,
**variant['eval_path_collector_kwargs']
)
replay_buffer = ObsDictReplayBuffer(
variant['replay_buffer_size'],
expl_env,
observation_key=observation_key,
)
trainer = SACTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant['trainer_kwargs']
)
if variant['collection_mode'] == 'batch':
expl_path_collector = ObsDictPathCollector(
expl_env,
policy,
observation_key=observation_key,
**variant['expl_path_collector_kwargs']
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algo_kwargs']
)
elif variant['collection_mode'] == 'online':
expl_path_collector = ObsDictStepCollector(
expl_env,
policy,
observation_key=observation_key,
**variant['expl_path_collector_kwargs']
)
algorithm = TorchOnlineRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algo_kwargs']
)
else:
raise NotImplementedError
video_func = VideoSaveFunctionBullet(variant)
algorithm.post_train_funcs.append(video_func)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
variant = dict(
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=1.0,
target_update_period=1000,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
use_automatic_entropy_tuning=True,
),
algo_kwargs=dict(
batch_size=256,
max_path_length=15,
num_epochs=5000,
num_eval_steps_per_epoch=45,
num_expl_steps_per_train_loop=300,
num_trains_per_train_loop=300,
min_num_steps_before_training=10*300,
),
cnn_params=dict(
kernel_sizes=[3, 3],
n_channels=[4, 4],
strides=[1, 1],
hidden_sizes=[32, 32],
paddings=[1, 1],
pool_type='max2d',
pool_sizes=[2, 2],
pool_strides=[2, 2],
pool_paddings=[0, 0],
),
qf_kwargs=dict(
hidden_sizes=[256, 256],
),
policy_kwargs=dict(
hidden_sizes=[256, 256],
),
dump_video_kwargs=dict(
imsize=48,
save_video_period=1,
),
logger_config=dict(
snapshot_gap=10,
),
dump_buffer_kwargs=dict(
dump_buffer_period=50,
),
replay_buffer_size=int(5E5),
expl_path_collector_kwargs=dict(),
eval_path_collector_kwargs=dict(),
shared_qf_conv=False,
use_robot_state=False,
randomize_env=True,
)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=1)
args = parser.parse_args()
variant['env'] = 'KukaGraspingProceduralEnv'
variant['obs'] = 'pixels'
n_seeds = 1
mode = 'local'
exp_prefix = 'dev-{}'.format(
__file__.replace('/', '-').replace('_', '-').split('.')[0]
)
exp_prefix = 'railrl-bullet-{}-{}'.format(variant['env'], variant['obs'])
search_space = {
'shared_qf_conv': [
True,
],
'collection_mode': [
'online',
]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
experiment,
exp_name=exp_prefix,
mode=mode,
variant=variant,
use_gpu=True,
gpu_id=args.gpu,
unpack_variant=False,
)
| true
| true
|
7906dcc36c49edaf946121c9e22c6d3b0f5c395e
| 1,792
|
py
|
Python
|
examples/multi_webcamera/host/test_module/__init__.py
|
Curly386/spresense
|
af5691b95640aea7edd04f0d2b733bdec753444b
|
[
"Apache-2.0"
] | 110
|
2018-07-12T16:04:50.000Z
|
2022-02-26T12:27:56.000Z
|
examples/multi_webcamera/host/test_module/__init__.py
|
Curly386/spresense
|
af5691b95640aea7edd04f0d2b733bdec753444b
|
[
"Apache-2.0"
] | 37
|
2018-08-10T13:05:45.000Z
|
2022-03-18T20:33:18.000Z
|
examples/multi_webcamera/host/test_module/__init__.py
|
Curly386/spresense
|
af5691b95640aea7edd04f0d2b733bdec753444b
|
[
"Apache-2.0"
] | 94
|
2018-07-13T03:48:34.000Z
|
2022-03-19T07:32:08.000Z
|
############################################################################
# examples/multi_webcamera/host/test_module/__init__.py
#
# Copyright 2019, 2020 Sony Semiconductor Solutions Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name NuttX nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
############################################################################
from TestServer import TestServer
| 49.777778
| 76
| 0.717634
| true
| true
|
|
7906ddcde6d1620f81e45bb1ed382aaf2324354a
| 557
|
py
|
Python
|
var/spack/repos/builtin/packages/py-fastjsonschema/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/py-fastjsonschema/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/py-fastjsonschema/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyFastjsonschema(PythonPackage):
"""Fast JSON schema validator for Python."""
homepage = "https://github.com/horejsek/python-fastjsonschema"
pypi = "fastjsonschema/fastjsonschema-2.15.1.tar.gz"
version('2.15.1', sha256='671f36d225b3493629b5e789428660109528f373cf4b8a22bac6fa2f8191c2d2')
depends_on('py-setuptools', type='build')
| 34.8125
| 96
| 0.759425
|
class PyFastjsonschema(PythonPackage):
homepage = "https://github.com/horejsek/python-fastjsonschema"
pypi = "fastjsonschema/fastjsonschema-2.15.1.tar.gz"
version('2.15.1', sha256='671f36d225b3493629b5e789428660109528f373cf4b8a22bac6fa2f8191c2d2')
depends_on('py-setuptools', type='build')
| true
| true
|
7906de69213fac71644c64f5bd8688674e6c5710
| 15,151
|
py
|
Python
|
external/jsoncppWrapper/makerelease.py
|
csyzzkdcz/effective-garbanzo
|
87223ecfc26371a9b251a70a0111ca4e0d95b594
|
[
"MIT"
] | null | null | null |
external/jsoncppWrapper/makerelease.py
|
csyzzkdcz/effective-garbanzo
|
87223ecfc26371a9b251a70a0111ca4e0d95b594
|
[
"MIT"
] | null | null | null |
external/jsoncppWrapper/makerelease.py
|
csyzzkdcz/effective-garbanzo
|
87223ecfc26371a9b251a70a0111ca4e0d95b594
|
[
"MIT"
] | null | null | null |
"""Tag the sandbox for release, make source and doc tarballs.
Requires Python 2.6
Example of invocation (use to test the script):
python makerelease.py --force --retag --platform=msvc6,msvc71,msvc80,mingw -ublep 0.5.0 0.6.0-dev
Example of invocation when doing a release:
python makerelease.py 0.5.0 0.6.0-dev
"""
import os.path
import subprocess
import sys
import doxybuild
import subprocess
import xml.etree.ElementTree as ElementTree
import shutil
import urllib2
import tempfile
import os
import time
from devtools import antglob, fixeol, tarball
SVN_ROOT = 'https://jsoncpp.svn.sourceforge.net/svnroot/jsoncpp/'
SVN_TAG_ROOT = SVN_ROOT + 'tags/jsoncpp'
SCONS_LOCAL_URL = 'http://sourceforge.net/projects/scons/files/scons-local/1.2.0/scons-local-1.2.0.tar.gz/download'
SOURCEFORGE_PROJECT = 'jsoncpp'
def set_version( version ):
with open('version','wb') as f:
f.write( version.strip() )
def rmdir_if_exist( dir_path ):
if os.path.isdir( dir_path ):
shutil.rmtree( dir_path )
class SVNError(Exception):
pass
def svn_command( command, *args ):
cmd = ['svn', '--non-interactive', command] + list(args)
print 'Running:', ' '.join( cmd )
process = subprocess.Popen( cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT )
stdout = process.communicate()[0]
if process.returncode:
error = SVNError( 'SVN command failed:\n' + stdout )
error.returncode = process.returncode
raise error
return stdout
def check_no_pending_commit():
"""Checks that there is no pending commit in the sandbox."""
stdout = svn_command( 'status', '--xml' )
etree = ElementTree.fromstring( stdout )
msg = []
for entry in etree.getiterator( 'entry' ):
path = entry.get('path')
status = entry.find('wc-status').get('item')
if status != 'unversioned' and path != 'version':
msg.append( 'File "%s" has pending change (status="%s")' % (path, status) )
if msg:
msg.insert(0, 'Pending change to commit found in sandbox. Commit them first!' )
return '\n'.join( msg )
def svn_join_url( base_url, suffix ):
if not base_url.endswith('/'):
base_url += '/'
if suffix.startswith('/'):
suffix = suffix[1:]
return base_url + suffix
def svn_check_if_tag_exist( tag_url ):
"""Checks if a tag exist.
Returns: True if the tag exist, False otherwise.
"""
try:
list_stdout = svn_command( 'list', tag_url )
except SVNError, e:
if e.returncode != 1 or not str(e).find('tag_url'):
raise e
# otherwise ignore error, meaning tag does not exist
return False
return True
def svn_commit( message ):
"""Commit the sandbox, providing the specified comment.
"""
svn_command( 'ci', '-m', message )
def svn_tag_sandbox( tag_url, message ):
"""Makes a tag based on the sandbox revisions.
"""
svn_command( 'copy', '-m', message, '.', tag_url )
def svn_remove_tag( tag_url, message ):
"""Removes an existing tag.
"""
svn_command( 'delete', '-m', message, tag_url )
def svn_export( tag_url, export_dir ):
"""Exports the tag_url revision to export_dir.
Target directory, including its parent is created if it does not exist.
If the directory export_dir exist, it is deleted before export proceed.
"""
rmdir_if_exist( export_dir )
svn_command( 'export', tag_url, export_dir )
def fix_sources_eol( dist_dir ):
"""Set file EOL for tarball distribution.
"""
print 'Preparing exported source file EOL for distribution...'
prune_dirs = antglob.prune_dirs + 'scons-local* ./build* ./libs ./dist'
win_sources = antglob.glob( dist_dir,
includes = '**/*.sln **/*.vcproj',
prune_dirs = prune_dirs )
unix_sources = antglob.glob( dist_dir,
includes = '''**/*.h **/*.cpp **/*.inl **/*.txt **/*.dox **/*.py **/*.html **/*.in
sconscript *.json *.expected AUTHORS LICENSE''',
excludes = antglob.default_excludes + 'scons.py sconsign.py scons-*',
prune_dirs = prune_dirs )
for path in win_sources:
fixeol.fix_source_eol( path, is_dry_run = False, verbose = True, eol = '\r\n' )
for path in unix_sources:
fixeol.fix_source_eol( path, is_dry_run = False, verbose = True, eol = '\n' )
def download( url, target_path ):
"""Download file represented by url to target_path.
"""
f = urllib2.urlopen( url )
try:
data = f.read()
finally:
f.close()
fout = open( target_path, 'wb' )
try:
fout.write( data )
finally:
fout.close()
def check_compile( distcheck_top_dir, platform ):
cmd = [sys.executable, 'scons.py', 'platform=%s' % platform, 'check']
print 'Running:', ' '.join( cmd )
log_path = os.path.join( distcheck_top_dir, 'build-%s.log' % platform )
flog = open( log_path, 'wb' )
try:
process = subprocess.Popen( cmd,
stdout=flog,
stderr=subprocess.STDOUT,
cwd=distcheck_top_dir )
stdout = process.communicate()[0]
status = (process.returncode == 0)
finally:
flog.close()
return (status, log_path)
def write_tempfile( content, **kwargs ):
fd, path = tempfile.mkstemp( **kwargs )
f = os.fdopen( fd, 'wt' )
try:
f.write( content )
finally:
f.close()
return path
class SFTPError(Exception):
pass
def run_sftp_batch( userhost, sftp, batch, retry=0 ):
path = write_tempfile( batch, suffix='.sftp', text=True )
# psftp -agent -C blep,jsoncpp@web.sourceforge.net -batch -b batch.sftp -bc
cmd = [sftp, '-agent', '-C', '-batch', '-b', path, '-bc', userhost]
error = None
for retry_index in xrange(0, max(1,retry)):
heading = retry_index == 0 and 'Running:' or 'Retrying:'
print heading, ' '.join( cmd )
process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
stdout = process.communicate()[0]
if process.returncode != 0:
error = SFTPError( 'SFTP batch failed:\n' + stdout )
else:
break
if error:
raise error
return stdout
def sourceforge_web_synchro( sourceforge_project, doc_dir,
user=None, sftp='sftp' ):
"""Notes: does not synchronize sub-directory of doc-dir.
"""
userhost = '%s,%s@web.sourceforge.net' % (user, sourceforge_project)
stdout = run_sftp_batch( userhost, sftp, """
cd htdocs
dir
exit
""" )
existing_paths = set()
collect = 0
for line in stdout.split('\n'):
line = line.strip()
if not collect and line.endswith('> dir'):
collect = True
elif collect and line.endswith('> exit'):
break
elif collect == 1:
collect = 2
elif collect == 2:
path = line.strip().split()[-1:]
if path and path[0] not in ('.', '..'):
existing_paths.add( path[0] )
upload_paths = set( [os.path.basename(p) for p in antglob.glob( doc_dir )] )
paths_to_remove = existing_paths - upload_paths
if paths_to_remove:
print 'Removing the following file from web:'
print '\n'.join( paths_to_remove )
stdout = run_sftp_batch( userhost, sftp, """cd htdocs
rm %s
exit""" % ' '.join(paths_to_remove) )
print 'Uploading %d files:' % len(upload_paths)
batch_size = 10
upload_paths = list(upload_paths)
start_time = time.time()
for index in xrange(0,len(upload_paths),batch_size):
paths = upload_paths[index:index+batch_size]
file_per_sec = (time.time() - start_time) / (index+1)
remaining_files = len(upload_paths) - index
remaining_sec = file_per_sec * remaining_files
print '%d/%d, ETA=%.1fs' % (index+1, len(upload_paths), remaining_sec)
run_sftp_batch( userhost, sftp, """cd htdocs
lcd %s
mput %s
exit""" % (doc_dir, ' '.join(paths) ), retry=3 )
def sourceforge_release_tarball( sourceforge_project, paths, user=None, sftp='sftp' ):
userhost = '%s,%s@frs.sourceforge.net' % (user, sourceforge_project)
run_sftp_batch( userhost, sftp, """
mput %s
exit
""" % (' '.join(paths),) )
def main():
usage = """%prog release_version next_dev_version
Update 'version' file to release_version and commit.
Generates the document tarball.
Tags the sandbox revision with release_version.
Update 'version' file to next_dev_version and commit.
Performs an svn export of tag release version, and build a source tarball.
Must be started in the project top directory.
Warning: --force should only be used when developping/testing the release script.
"""
from optparse import OptionParser
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('--dot', dest="dot_path", action='store', default=doxybuild.find_program('dot'),
help="""Path to GraphViz dot tool. Must be full qualified path. [Default: %default]""")
parser.add_option('--doxygen', dest="doxygen_path", action='store', default=doxybuild.find_program('doxygen'),
help="""Path to Doxygen tool. [Default: %default]""")
parser.add_option('--force', dest="ignore_pending_commit", action='store_true', default=False,
help="""Ignore pending commit. [Default: %default]""")
parser.add_option('--retag', dest="retag_release", action='store_true', default=False,
help="""Overwrite release existing tag if it exist. [Default: %default]""")
parser.add_option('-p', '--platforms', dest="platforms", action='store', default='',
help="""Comma separated list of platform passed to scons for build check.""")
parser.add_option('--no-test', dest="no_test", action='store_true', default=False,
help="""Skips build check.""")
parser.add_option('--no-web', dest="no_web", action='store_true', default=False,
help="""Do not update web site.""")
parser.add_option('-u', '--upload-user', dest="user", action='store',
help="""Sourceforge user for SFTP documentation upload.""")
parser.add_option('--sftp', dest='sftp', action='store', default=doxybuild.find_program('psftp', 'sftp'),
help="""Path of the SFTP compatible binary used to upload the documentation.""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
if len(args) != 2:
parser.error( 'release_version missing on command-line.' )
release_version = args[0]
next_version = args[1]
if not options.platforms and not options.no_test:
parser.error( 'You must specify either --platform or --no-test option.' )
if options.ignore_pending_commit:
msg = ''
else:
msg = check_no_pending_commit()
if not msg:
print 'Setting version to', release_version
set_version( release_version )
svn_commit( 'Release ' + release_version )
tag_url = svn_join_url( SVN_TAG_ROOT, release_version )
if svn_check_if_tag_exist( tag_url ):
if options.retag_release:
svn_remove_tag( tag_url, 'Overwriting previous tag' )
else:
print 'Aborting, tag %s already exist. Use --retag to overwrite it!' % tag_url
sys.exit( 1 )
svn_tag_sandbox( tag_url, 'Release ' + release_version )
print 'Generated doxygen document...'
## doc_dirname = r'jsoncpp-api-html-0.5.0'
## doc_tarball_path = r'e:\prg\vc\Lib\jsoncpp-trunk\dist\jsoncpp-api-html-0.5.0.tar.gz'
doc_tarball_path, doc_dirname = doxybuild.build_doc( options, make_release=True )
doc_distcheck_dir = 'dist/doccheck'
tarball.decompress( doc_tarball_path, doc_distcheck_dir )
doc_distcheck_top_dir = os.path.join( doc_distcheck_dir, doc_dirname )
export_dir = 'dist/export'
svn_export( tag_url, export_dir )
fix_sources_eol( export_dir )
source_dir = 'jsoncpp-src-' + release_version
source_tarball_path = 'dist/%s.tar.gz' % source_dir
print 'Generating source tarball to', source_tarball_path
tarball.make_tarball( source_tarball_path, [export_dir], export_dir, prefix_dir=source_dir )
# Decompress source tarball, download and install scons-local
distcheck_dir = 'dist/distcheck'
distcheck_top_dir = distcheck_dir + '/' + source_dir
print 'Decompressing source tarball to', distcheck_dir
rmdir_if_exist( distcheck_dir )
tarball.decompress( source_tarball_path, distcheck_dir )
scons_local_path = 'dist/scons-local.tar.gz'
print 'Downloading scons-local to', scons_local_path
download( SCONS_LOCAL_URL, scons_local_path )
print 'Decompressing scons-local to', distcheck_top_dir
tarball.decompress( scons_local_path, distcheck_top_dir )
# Run compilation
print 'Compiling decompressed tarball'
all_build_status = True
for platform in options.platforms.split(','):
print 'Testing platform:', platform
build_status, log_path = check_compile( distcheck_top_dir, platform )
print 'see build log:', log_path
print build_status and '=> ok' or '=> FAILED'
all_build_status = all_build_status and build_status
if not build_status:
print 'Testing failed on at least one platform, aborting...'
svn_remove_tag( tag_url, 'Removing tag due to failed testing' )
sys.exit(1)
if options.user:
if not options.no_web:
print 'Uploading documentation using user', options.user
sourceforge_web_synchro( SOURCEFORGE_PROJECT, doc_distcheck_top_dir, user=options.user, sftp=options.sftp )
print 'Completed documentation upload'
print 'Uploading source and documentation tarballs for release using user', options.user
sourceforge_release_tarball( SOURCEFORGE_PROJECT,
[source_tarball_path, doc_tarball_path],
user=options.user, sftp=options.sftp )
print 'Source and doc release tarballs uploaded'
else:
print 'No upload user specified. Web site and download tarbal were not uploaded.'
print 'Tarball can be found at:', doc_tarball_path
# Set next version number and commit
set_version( next_version )
svn_commit( 'Released ' + release_version )
else:
sys.stderr.write( msg + '\n' )
if __name__ == '__main__':
main()
| 41.059621
| 124
| 0.620883
|
"""Tag the sandbox for release, make source and doc tarballs.
Requires Python 2.6
Example of invocation (use to test the script):
python makerelease.py --force --retag --platform=msvc6,msvc71,msvc80,mingw -ublep 0.5.0 0.6.0-dev
Example of invocation when doing a release:
python makerelease.py 0.5.0 0.6.0-dev
"""
import os.path
import subprocess
import sys
import doxybuild
import subprocess
import xml.etree.ElementTree as ElementTree
import shutil
import urllib2
import tempfile
import os
import time
from devtools import antglob, fixeol, tarball
SVN_ROOT = 'https://jsoncpp.svn.sourceforge.net/svnroot/jsoncpp/'
SVN_TAG_ROOT = SVN_ROOT + 'tags/jsoncpp'
SCONS_LOCAL_URL = 'http://sourceforge.net/projects/scons/files/scons-local/1.2.0/scons-local-1.2.0.tar.gz/download'
SOURCEFORGE_PROJECT = 'jsoncpp'
def set_version( version ):
with open('version','wb') as f:
f.write( version.strip() )
def rmdir_if_exist( dir_path ):
if os.path.isdir( dir_path ):
shutil.rmtree( dir_path )
class SVNError(Exception):
pass
def svn_command( command, *args ):
cmd = ['svn', '--non-interactive', command] + list(args)
print 'Running:', ' '.join( cmd )
process = subprocess.Popen( cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT )
stdout = process.communicate()[0]
if process.returncode:
error = SVNError( 'SVN command failed:\n' + stdout )
error.returncode = process.returncode
raise error
return stdout
def check_no_pending_commit():
"""Checks that there is no pending commit in the sandbox."""
stdout = svn_command( 'status', '--xml' )
etree = ElementTree.fromstring( stdout )
msg = []
for entry in etree.getiterator( 'entry' ):
path = entry.get('path')
status = entry.find('wc-status').get('item')
if status != 'unversioned' and path != 'version':
msg.append( 'File "%s" has pending change (status="%s")' % (path, status) )
if msg:
msg.insert(0, 'Pending change to commit found in sandbox. Commit them first!' )
return '\n'.join( msg )
def svn_join_url( base_url, suffix ):
if not base_url.endswith('/'):
base_url += '/'
if suffix.startswith('/'):
suffix = suffix[1:]
return base_url + suffix
def svn_check_if_tag_exist( tag_url ):
"""Checks if a tag exist.
Returns: True if the tag exist, False otherwise.
"""
try:
list_stdout = svn_command( 'list', tag_url )
except SVNError, e:
if e.returncode != 1 or not str(e).find('tag_url'):
raise e
return False
return True
def svn_commit( message ):
"""Commit the sandbox, providing the specified comment.
"""
svn_command( 'ci', '-m', message )
def svn_tag_sandbox( tag_url, message ):
"""Makes a tag based on the sandbox revisions.
"""
svn_command( 'copy', '-m', message, '.', tag_url )
def svn_remove_tag( tag_url, message ):
"""Removes an existing tag.
"""
svn_command( 'delete', '-m', message, tag_url )
def svn_export( tag_url, export_dir ):
"""Exports the tag_url revision to export_dir.
Target directory, including its parent is created if it does not exist.
If the directory export_dir exist, it is deleted before export proceed.
"""
rmdir_if_exist( export_dir )
svn_command( 'export', tag_url, export_dir )
def fix_sources_eol( dist_dir ):
"""Set file EOL for tarball distribution.
"""
print 'Preparing exported source file EOL for distribution...'
prune_dirs = antglob.prune_dirs + 'scons-local* ./build* ./libs ./dist'
win_sources = antglob.glob( dist_dir,
includes = '**/*.sln **/*.vcproj',
prune_dirs = prune_dirs )
unix_sources = antglob.glob( dist_dir,
includes = '''**/*.h **/*.cpp **/*.inl **/*.txt **/*.dox **/*.py **/*.html **/*.in
sconscript *.json *.expected AUTHORS LICENSE''',
excludes = antglob.default_excludes + 'scons.py sconsign.py scons-*',
prune_dirs = prune_dirs )
for path in win_sources:
fixeol.fix_source_eol( path, is_dry_run = False, verbose = True, eol = '\r\n' )
for path in unix_sources:
fixeol.fix_source_eol( path, is_dry_run = False, verbose = True, eol = '\n' )
def download( url, target_path ):
"""Download file represented by url to target_path.
"""
f = urllib2.urlopen( url )
try:
data = f.read()
finally:
f.close()
fout = open( target_path, 'wb' )
try:
fout.write( data )
finally:
fout.close()
def check_compile( distcheck_top_dir, platform ):
cmd = [sys.executable, 'scons.py', 'platform=%s' % platform, 'check']
print 'Running:', ' '.join( cmd )
log_path = os.path.join( distcheck_top_dir, 'build-%s.log' % platform )
flog = open( log_path, 'wb' )
try:
process = subprocess.Popen( cmd,
stdout=flog,
stderr=subprocess.STDOUT,
cwd=distcheck_top_dir )
stdout = process.communicate()[0]
status = (process.returncode == 0)
finally:
flog.close()
return (status, log_path)
def write_tempfile( content, **kwargs ):
fd, path = tempfile.mkstemp( **kwargs )
f = os.fdopen( fd, 'wt' )
try:
f.write( content )
finally:
f.close()
return path
class SFTPError(Exception):
pass
def run_sftp_batch( userhost, sftp, batch, retry=0 ):
path = write_tempfile( batch, suffix='.sftp', text=True )
cmd = [sftp, '-agent', '-C', '-batch', '-b', path, '-bc', userhost]
error = None
for retry_index in xrange(0, max(1,retry)):
heading = retry_index == 0 and 'Running:' or 'Retrying:'
print heading, ' '.join( cmd )
process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
stdout = process.communicate()[0]
if process.returncode != 0:
error = SFTPError( 'SFTP batch failed:\n' + stdout )
else:
break
if error:
raise error
return stdout
def sourceforge_web_synchro( sourceforge_project, doc_dir,
user=None, sftp='sftp' ):
"""Notes: does not synchronize sub-directory of doc-dir.
"""
userhost = '%s,%s@web.sourceforge.net' % (user, sourceforge_project)
stdout = run_sftp_batch( userhost, sftp, """
cd htdocs
dir
exit
""" )
existing_paths = set()
collect = 0
for line in stdout.split('\n'):
line = line.strip()
if not collect and line.endswith('> dir'):
collect = True
elif collect and line.endswith('> exit'):
break
elif collect == 1:
collect = 2
elif collect == 2:
path = line.strip().split()[-1:]
if path and path[0] not in ('.', '..'):
existing_paths.add( path[0] )
upload_paths = set( [os.path.basename(p) for p in antglob.glob( doc_dir )] )
paths_to_remove = existing_paths - upload_paths
if paths_to_remove:
print 'Removing the following file from web:'
print '\n'.join( paths_to_remove )
stdout = run_sftp_batch( userhost, sftp, """cd htdocs
rm %s
exit""" % ' '.join(paths_to_remove) )
print 'Uploading %d files:' % len(upload_paths)
batch_size = 10
upload_paths = list(upload_paths)
start_time = time.time()
for index in xrange(0,len(upload_paths),batch_size):
paths = upload_paths[index:index+batch_size]
file_per_sec = (time.time() - start_time) / (index+1)
remaining_files = len(upload_paths) - index
remaining_sec = file_per_sec * remaining_files
print '%d/%d, ETA=%.1fs' % (index+1, len(upload_paths), remaining_sec)
run_sftp_batch( userhost, sftp, """cd htdocs
lcd %s
mput %s
exit""" % (doc_dir, ' '.join(paths) ), retry=3 )
def sourceforge_release_tarball( sourceforge_project, paths, user=None, sftp='sftp' ):
userhost = '%s,%s@frs.sourceforge.net' % (user, sourceforge_project)
run_sftp_batch( userhost, sftp, """
mput %s
exit
""" % (' '.join(paths),) )
def main():
usage = """%prog release_version next_dev_version
Update 'version' file to release_version and commit.
Generates the document tarball.
Tags the sandbox revision with release_version.
Update 'version' file to next_dev_version and commit.
Performs an svn export of tag release version, and build a source tarball.
Must be started in the project top directory.
Warning: --force should only be used when developping/testing the release script.
"""
from optparse import OptionParser
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('--dot', dest="dot_path", action='store', default=doxybuild.find_program('dot'),
help="""Path to GraphViz dot tool. Must be full qualified path. [Default: %default]""")
parser.add_option('--doxygen', dest="doxygen_path", action='store', default=doxybuild.find_program('doxygen'),
help="""Path to Doxygen tool. [Default: %default]""")
parser.add_option('--force', dest="ignore_pending_commit", action='store_true', default=False,
help="""Ignore pending commit. [Default: %default]""")
parser.add_option('--retag', dest="retag_release", action='store_true', default=False,
help="""Overwrite release existing tag if it exist. [Default: %default]""")
parser.add_option('-p', '--platforms', dest="platforms", action='store', default='',
help="""Comma separated list of platform passed to scons for build check.""")
parser.add_option('--no-test', dest="no_test", action='store_true', default=False,
help="""Skips build check.""")
parser.add_option('--no-web', dest="no_web", action='store_true', default=False,
help="""Do not update web site.""")
parser.add_option('-u', '--upload-user', dest="user", action='store',
help="""Sourceforge user for SFTP documentation upload.""")
parser.add_option('--sftp', dest='sftp', action='store', default=doxybuild.find_program('psftp', 'sftp'),
help="""Path of the SFTP compatible binary used to upload the documentation.""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
if len(args) != 2:
parser.error( 'release_version missing on command-line.' )
release_version = args[0]
next_version = args[1]
if not options.platforms and not options.no_test:
parser.error( 'You must specify either --platform or --no-test option.' )
if options.ignore_pending_commit:
msg = ''
else:
msg = check_no_pending_commit()
if not msg:
print 'Setting version to', release_version
set_version( release_version )
svn_commit( 'Release ' + release_version )
tag_url = svn_join_url( SVN_TAG_ROOT, release_version )
if svn_check_if_tag_exist( tag_url ):
if options.retag_release:
svn_remove_tag( tag_url, 'Overwriting previous tag' )
else:
print 'Aborting, tag %s already exist. Use --retag to overwrite it!' % tag_url
sys.exit( 1 )
svn_tag_sandbox( tag_url, 'Release ' + release_version )
print 'Generated doxygen document...'
tarball.decompress( doc_tarball_path, doc_distcheck_dir )
doc_distcheck_top_dir = os.path.join( doc_distcheck_dir, doc_dirname )
export_dir = 'dist/export'
svn_export( tag_url, export_dir )
fix_sources_eol( export_dir )
source_dir = 'jsoncpp-src-' + release_version
source_tarball_path = 'dist/%s.tar.gz' % source_dir
print 'Generating source tarball to', source_tarball_path
tarball.make_tarball( source_tarball_path, [export_dir], export_dir, prefix_dir=source_dir )
distcheck_dir = 'dist/distcheck'
distcheck_top_dir = distcheck_dir + '/' + source_dir
print 'Decompressing source tarball to', distcheck_dir
rmdir_if_exist( distcheck_dir )
tarball.decompress( source_tarball_path, distcheck_dir )
scons_local_path = 'dist/scons-local.tar.gz'
print 'Downloading scons-local to', scons_local_path
download( SCONS_LOCAL_URL, scons_local_path )
print 'Decompressing scons-local to', distcheck_top_dir
tarball.decompress( scons_local_path, distcheck_top_dir )
print 'Compiling decompressed tarball'
all_build_status = True
for platform in options.platforms.split(','):
print 'Testing platform:', platform
build_status, log_path = check_compile( distcheck_top_dir, platform )
print 'see build log:', log_path
print build_status and '=> ok' or '=> FAILED'
all_build_status = all_build_status and build_status
if not build_status:
print 'Testing failed on at least one platform, aborting...'
svn_remove_tag( tag_url, 'Removing tag due to failed testing' )
sys.exit(1)
if options.user:
if not options.no_web:
print 'Uploading documentation using user', options.user
sourceforge_web_synchro( SOURCEFORGE_PROJECT, doc_distcheck_top_dir, user=options.user, sftp=options.sftp )
print 'Completed documentation upload'
print 'Uploading source and documentation tarballs for release using user', options.user
sourceforge_release_tarball( SOURCEFORGE_PROJECT,
[source_tarball_path, doc_tarball_path],
user=options.user, sftp=options.sftp )
print 'Source and doc release tarballs uploaded'
else:
print 'No upload user specified. Web site and download tarbal were not uploaded.'
print 'Tarball can be found at:', doc_tarball_path
set_version( next_version )
svn_commit( 'Released ' + release_version )
else:
sys.stderr.write( msg + '\n' )
if __name__ == '__main__':
main()
| false
| true
|
7906de69e21c5d928080458b1e43711fa9c55e23
| 12,026
|
py
|
Python
|
modin/config/envvars.py
|
atomicai/modin
|
ecaab1baafbf7e94aeb59aab8dd7fb48b687b4a3
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
modin/config/envvars.py
|
atomicai/modin
|
ecaab1baafbf7e94aeb59aab8dd7fb48b687b4a3
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
modin/config/envvars.py
|
atomicai/modin
|
ecaab1baafbf7e94aeb59aab8dd7fb48b687b4a3
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses Modin configs originated from environment variables."""
import os
import sys
from textwrap import dedent
import warnings
from packaging import version
import secrets
from .pubsub import Parameter, _TYPE_PARAMS, ExactStr, ValueSource
class EnvironmentVariable(Parameter, type=str, abstract=True):
"""Base class for environment variables-based configuration."""
varname: str = None
@classmethod
def _get_raw_from_config(cls) -> str:
"""
Read the value from environment variable.
Returns
-------
str
Config raw value.
Raises
------
KeyError
If value is absent.
"""
return os.environ[cls.varname]
@classmethod
def get_help(cls) -> str:
"""
Generate user-presentable help for the config.
Returns
-------
str
"""
help = f"{cls.varname}: {dedent(cls.__doc__ or 'Unknown').strip()}\n\tProvide {_TYPE_PARAMS[cls.type].help}"
if cls.choices:
help += f" (valid examples are: {', '.join(str(c) for c in cls.choices)})"
return help
class IsDebug(EnvironmentVariable, type=bool):
"""Force Modin engine to be "Python" unless specified by $MODIN_ENGINE."""
varname = "MODIN_DEBUG"
class Engine(EnvironmentVariable, type=str):
"""Distribution engine to run queries by."""
varname = "MODIN_ENGINE"
choices = ("Ray", "Dask", "Python", "Native")
@classmethod
def _get_default(cls):
"""
Get default value of the config.
Returns
-------
str
"""
if IsDebug.get():
return "Python"
try:
import ray
except ImportError:
pass
else:
if version.parse(ray.__version__) < version.parse("1.4.0"):
raise ImportError(
"Please `pip install modin[ray]` to install compatible Ray version."
)
return "Ray"
try:
import dask
import distributed
except ImportError:
pass
else:
if version.parse(dask.__version__) < version.parse(
"2.22.0"
) or version.parse(distributed.__version__) < version.parse("2.22.0"):
raise ImportError(
"Please `pip install modin[dask]` to install compatible Dask version."
)
return "Dask"
try:
import omniscidbe # noqa
except ImportError:
try:
import dbe # noqa
except ImportError:
pass
else:
return "Native"
else:
return "Native"
raise ImportError(
"Please refer to installation documentation page to install an engine"
)
class Backend(EnvironmentVariable, type=str):
"""Engine to run on a single node of distribution."""
varname = "MODIN_BACKEND"
default = "Pandas"
choices = ("Pandas", "OmniSci", "Pyarrow", "Cudf")
class IsExperimental(EnvironmentVariable, type=bool):
"""Whether to Turn on experimental features."""
varname = "MODIN_EXPERIMENTAL"
class IsRayCluster(EnvironmentVariable, type=bool):
"""Whether Modin is running on pre-initialized Ray cluster."""
varname = "MODIN_RAY_CLUSTER"
class RayRedisAddress(EnvironmentVariable, type=ExactStr):
"""Redis address to connect to when running in Ray cluster."""
varname = "MODIN_REDIS_ADDRESS"
class RayRedisPassword(EnvironmentVariable, type=ExactStr):
"""What password to use for connecting to Redis."""
varname = "MODIN_REDIS_PASSWORD"
default = secrets.token_hex(32)
class CpuCount(EnvironmentVariable, type=int):
"""How many CPU cores to use during initialization of the Modin engine."""
varname = "MODIN_CPUS"
@classmethod
def _get_default(cls):
"""
Get default value of the config.
Returns
-------
int
"""
import multiprocessing
return multiprocessing.cpu_count()
class GpuCount(EnvironmentVariable, type=int):
"""How may GPU devices to utilize across the whole distribution."""
varname = "MODIN_GPUS"
class Memory(EnvironmentVariable, type=int):
"""
How much memory (in bytes) give to an execution engine.
Notes
-----
* In Ray case: the amount of memory to start the Plasma object store with.
* In Dask case: the amount of memory that is given to each worker depending on CPUs used.
"""
varname = "MODIN_MEMORY"
class NPartitions(EnvironmentVariable, type=int):
"""How many partitions to use for a Modin DataFrame (along each axis)."""
varname = "MODIN_NPARTITIONS"
@classmethod
def _put(cls, value):
"""
Put specific value if NPartitions wasn't set by a user yet.
Parameters
----------
value : int
Config value to set.
Notes
-----
This method is used to set NPartitions from cluster resources internally
and should not be called by a user.
"""
if cls.get_value_source() == ValueSource.DEFAULT:
cls.put(value)
@classmethod
def _get_default(cls):
"""
Get default value of the config.
Returns
-------
int
"""
if Backend.get() == "Cudf":
return GpuCount.get()
else:
return CpuCount.get()
class SocksProxy(EnvironmentVariable, type=ExactStr):
"""SOCKS proxy address if it is needed for SSH to work."""
varname = "MODIN_SOCKS_PROXY"
class DoLogRpyc(EnvironmentVariable, type=bool):
"""Whether to gather RPyC logs (applicable for remote context)."""
varname = "MODIN_LOG_RPYC"
class DoTraceRpyc(EnvironmentVariable, type=bool):
"""Whether to trace RPyC calls (applicable for remote context)."""
varname = "MODIN_TRACE_RPYC"
class OmnisciFragmentSize(EnvironmentVariable, type=int):
"""How big a fragment in OmniSci should be when creating a table (in rows)."""
varname = "MODIN_OMNISCI_FRAGMENT_SIZE"
class DoUseCalcite(EnvironmentVariable, type=bool):
"""Whether to use Calcite for OmniSci queries execution."""
varname = "MODIN_USE_CALCITE"
default = True
class TestDatasetSize(EnvironmentVariable, type=str):
"""Dataset size for running some tests."""
varname = "MODIN_TEST_DATASET_SIZE"
choices = ("Small", "Normal", "Big")
class TestRayClient(EnvironmentVariable, type=bool):
"""Set to true to start and connect Ray client before a testing session starts."""
varname = "MODIN_TEST_RAY_CLIENT"
default = False
class TrackFileLeaks(EnvironmentVariable, type=bool):
"""Whether to track for open file handles leakage during testing."""
varname = "MODIN_TEST_TRACK_FILE_LEAKS"
# Turn off tracking on Windows by default because
# psutil's open_files() can be extremely slow on Windows (up to adding a few hours).
# see https://github.com/giampaolo/psutil/pull/597
default = sys.platform != "win32"
class AsvImplementation(EnvironmentVariable, type=ExactStr):
"""Allows to select a library that we will use for testing performance."""
varname = "MODIN_ASV_USE_IMPL"
choices = ("modin", "pandas")
default = "modin"
class AsvDataSizeConfig(EnvironmentVariable, type=ExactStr):
"""Allows to override default size of data (shapes)."""
varname = "MODIN_ASV_DATASIZE_CONFIG"
default = None
class ProgressBar(EnvironmentVariable, type=bool):
"""Whether or not to show the progress bar."""
varname = "MODIN_PROGRESS_BAR"
default = False
@classmethod
def enable(cls):
"""Enable ``ProgressBar`` feature."""
cls.put(True)
@classmethod
def disable(cls):
"""Disable ``ProgressBar`` feature."""
cls.put(False)
@classmethod
def put(cls, value):
"""
Set ``ProgressBar`` value only if synchronous benchmarking is disabled.
Parameters
----------
value : bool
Config value to set.
"""
if value and BenchmarkMode.get():
raise ValueError("ProgressBar isn't compatible with BenchmarkMode")
super().put(value)
class BenchmarkMode(EnvironmentVariable, type=bool):
"""Whether or not to perform computations synchronously."""
varname = "MODIN_BENCHMARK_MODE"
default = False
@classmethod
def put(cls, value):
"""
Set ``BenchmarkMode`` value only if progress bar feature is disabled.
Parameters
----------
value : bool
Config value to set.
"""
if value and ProgressBar.get():
raise ValueError("BenchmarkMode isn't compatible with ProgressBar")
super().put(value)
class PersistentPickle(EnvironmentVariable, type=bool):
"""Wheather serialization should be persistent."""
varname = "MODIN_PERSISTENT_PICKLE"
# When set to off, it allows faster serialization which is only
# valid in current run (i.e. useless for saving to disk).
# When set to on, Modin objects could be saved to disk and loaded
# but serialization/deserialization could take more time.
default = False
class OmnisciLaunchParameters(EnvironmentVariable, type=dict):
"""
Additional command line options for the OmniSci engine.
Please visit OmniSci documentation for the description of available parameters:
https://docs.omnisci.com/installation-and-configuration/config-parameters#configuration-parameters-for-omniscidb
"""
varname = "MODIN_OMNISCI_LAUNCH_PARAMETERS"
default = {
"enable_union": 1,
"enable_columnar_output": 1,
"enable_lazy_fetch": 0,
"null_div_by_zero": 1,
"enable_watchdog": 0,
}
@classmethod
def get(self):
"""
Get the resulted command-line options.
Decode and merge specified command-line options with the default one.
Returns
-------
dict
Decoded and verified config value.
"""
custom_parameters = super().get()
result = self.default.copy()
result.update(
{key.replace("-", "_"): value for key, value in custom_parameters.items()}
)
return result
def _check_vars():
"""
Check validity of environment variables.
Look out for any environment variables that start with "MODIN_" prefix
that are unknown - they might be a typo, so warn a user.
"""
valid_names = {
obj.varname
for obj in globals().values()
if isinstance(obj, type)
and issubclass(obj, EnvironmentVariable)
and not obj.is_abstract
}
found_names = {name for name in os.environ if name.startswith("MODIN_")}
unknown = found_names - valid_names
if unknown:
warnings.warn(
f"Found unknown environment variable{'s' if len(unknown) > 1 else ''},"
f" please check {'their' if len(unknown) > 1 else 'its'} spelling: "
+ ", ".join(sorted(unknown))
)
_check_vars()
| 27.837963
| 116
| 0.633378
|
import os
import sys
from textwrap import dedent
import warnings
from packaging import version
import secrets
from .pubsub import Parameter, _TYPE_PARAMS, ExactStr, ValueSource
class EnvironmentVariable(Parameter, type=str, abstract=True):
varname: str = None
@classmethod
def _get_raw_from_config(cls) -> str:
return os.environ[cls.varname]
@classmethod
def get_help(cls) -> str:
help = f"{cls.varname}: {dedent(cls.__doc__ or 'Unknown').strip()}\n\tProvide {_TYPE_PARAMS[cls.type].help}"
if cls.choices:
help += f" (valid examples are: {', '.join(str(c) for c in cls.choices)})"
return help
class IsDebug(EnvironmentVariable, type=bool):
varname = "MODIN_DEBUG"
class Engine(EnvironmentVariable, type=str):
varname = "MODIN_ENGINE"
choices = ("Ray", "Dask", "Python", "Native")
@classmethod
def _get_default(cls):
if IsDebug.get():
return "Python"
try:
import ray
except ImportError:
pass
else:
if version.parse(ray.__version__) < version.parse("1.4.0"):
raise ImportError(
"Please `pip install modin[ray]` to install compatible Ray version."
)
return "Ray"
try:
import dask
import distributed
except ImportError:
pass
else:
if version.parse(dask.__version__) < version.parse(
"2.22.0"
) or version.parse(distributed.__version__) < version.parse("2.22.0"):
raise ImportError(
"Please `pip install modin[dask]` to install compatible Dask version."
)
return "Dask"
try:
import omniscidbe
except ImportError:
try:
import dbe
except ImportError:
pass
else:
return "Native"
else:
return "Native"
raise ImportError(
"Please refer to installation documentation page to install an engine"
)
class Backend(EnvironmentVariable, type=str):
varname = "MODIN_BACKEND"
default = "Pandas"
choices = ("Pandas", "OmniSci", "Pyarrow", "Cudf")
class IsExperimental(EnvironmentVariable, type=bool):
varname = "MODIN_EXPERIMENTAL"
class IsRayCluster(EnvironmentVariable, type=bool):
varname = "MODIN_RAY_CLUSTER"
class RayRedisAddress(EnvironmentVariable, type=ExactStr):
varname = "MODIN_REDIS_ADDRESS"
class RayRedisPassword(EnvironmentVariable, type=ExactStr):
varname = "MODIN_REDIS_PASSWORD"
default = secrets.token_hex(32)
class CpuCount(EnvironmentVariable, type=int):
varname = "MODIN_CPUS"
@classmethod
def _get_default(cls):
import multiprocessing
return multiprocessing.cpu_count()
class GpuCount(EnvironmentVariable, type=int):
varname = "MODIN_GPUS"
class Memory(EnvironmentVariable, type=int):
varname = "MODIN_MEMORY"
class NPartitions(EnvironmentVariable, type=int):
varname = "MODIN_NPARTITIONS"
@classmethod
def _put(cls, value):
if cls.get_value_source() == ValueSource.DEFAULT:
cls.put(value)
@classmethod
def _get_default(cls):
if Backend.get() == "Cudf":
return GpuCount.get()
else:
return CpuCount.get()
class SocksProxy(EnvironmentVariable, type=ExactStr):
varname = "MODIN_SOCKS_PROXY"
class DoLogRpyc(EnvironmentVariable, type=bool):
varname = "MODIN_LOG_RPYC"
class DoTraceRpyc(EnvironmentVariable, type=bool):
varname = "MODIN_TRACE_RPYC"
class OmnisciFragmentSize(EnvironmentVariable, type=int):
varname = "MODIN_OMNISCI_FRAGMENT_SIZE"
class DoUseCalcite(EnvironmentVariable, type=bool):
varname = "MODIN_USE_CALCITE"
default = True
class TestDatasetSize(EnvironmentVariable, type=str):
varname = "MODIN_TEST_DATASET_SIZE"
choices = ("Small", "Normal", "Big")
class TestRayClient(EnvironmentVariable, type=bool):
varname = "MODIN_TEST_RAY_CLIENT"
default = False
class TrackFileLeaks(EnvironmentVariable, type=bool):
varname = "MODIN_TEST_TRACK_FILE_LEAKS"
# see https://github.com/giampaolo/psutil/pull/597
default = sys.platform != "win32"
class AsvImplementation(EnvironmentVariable, type=ExactStr):
varname = "MODIN_ASV_USE_IMPL"
choices = ("modin", "pandas")
default = "modin"
class AsvDataSizeConfig(EnvironmentVariable, type=ExactStr):
varname = "MODIN_ASV_DATASIZE_CONFIG"
default = None
class ProgressBar(EnvironmentVariable, type=bool):
varname = "MODIN_PROGRESS_BAR"
default = False
@classmethod
def enable(cls):
cls.put(True)
@classmethod
def disable(cls):
cls.put(False)
@classmethod
def put(cls, value):
if value and BenchmarkMode.get():
raise ValueError("ProgressBar isn't compatible with BenchmarkMode")
super().put(value)
class BenchmarkMode(EnvironmentVariable, type=bool):
varname = "MODIN_BENCHMARK_MODE"
default = False
@classmethod
def put(cls, value):
if value and ProgressBar.get():
raise ValueError("BenchmarkMode isn't compatible with ProgressBar")
super().put(value)
class PersistentPickle(EnvironmentVariable, type=bool):
varname = "MODIN_PERSISTENT_PICKLE"
# When set to off, it allows faster serialization which is only
# valid in current run (i.e. useless for saving to disk).
# When set to on, Modin objects could be saved to disk and loaded
# but serialization/deserialization could take more time.
default = False
class OmnisciLaunchParameters(EnvironmentVariable, type=dict):
varname = "MODIN_OMNISCI_LAUNCH_PARAMETERS"
default = {
"enable_union": 1,
"enable_columnar_output": 1,
"enable_lazy_fetch": 0,
"null_div_by_zero": 1,
"enable_watchdog": 0,
}
@classmethod
def get(self):
custom_parameters = super().get()
result = self.default.copy()
result.update(
{key.replace("-", "_"): value for key, value in custom_parameters.items()}
)
return result
def _check_vars():
valid_names = {
obj.varname
for obj in globals().values()
if isinstance(obj, type)
and issubclass(obj, EnvironmentVariable)
and not obj.is_abstract
}
found_names = {name for name in os.environ if name.startswith("MODIN_")}
unknown = found_names - valid_names
if unknown:
warnings.warn(
f"Found unknown environment variable{'s' if len(unknown) > 1 else ''},"
f" please check {'their' if len(unknown) > 1 else 'its'} spelling: "
+ ", ".join(sorted(unknown))
)
_check_vars()
| true
| true
|
7906ded99a8d5f5babeeb0b290d7fff6e133a906
| 5,263
|
py
|
Python
|
tests/conftest.py
|
snebel29/kubernetes-ingress
|
a31cd87288fa102ef9f094da7ecd371e9b36c680
|
[
"Apache-2.0"
] | 1
|
2022-03-02T19:05:19.000Z
|
2022-03-02T19:05:19.000Z
|
tests/conftest.py
|
snebel29/kubernetes-ingress
|
a31cd87288fa102ef9f094da7ecd371e9b36c680
|
[
"Apache-2.0"
] | 228
|
2021-02-06T17:28:21.000Z
|
2022-03-31T02:08:34.000Z
|
tests/conftest.py
|
snebel29/kubernetes-ingress
|
a31cd87288fa102ef9f094da7ecd371e9b36c680
|
[
"Apache-2.0"
] | null | null | null |
"""Describe overall framework configuration."""
import os
import pytest
from kubernetes.config.kube_config import KUBE_CONFIG_DEFAULT_LOCATION
from settings import (
DEFAULT_IMAGE,
DEFAULT_PULL_POLICY,
DEFAULT_IC_TYPE,
DEFAULT_SERVICE,
DEFAULT_DEPLOYMENT_TYPE,
NUM_REPLICAS,
BATCH_START,
BATCH_RESOURCES,
)
from suite.resources_utils import get_first_pod_name
def pytest_addoption(parser) -> None:
"""Get cli-arguments.
:param parser: pytest parser
:return:
"""
parser.addoption(
"--context",
action="store",
default="",
help="The context to use in the kubeconfig file.",
)
parser.addoption(
"--image",
action="store",
default=DEFAULT_IMAGE,
help="The Ingress Controller image.",
)
parser.addoption(
"--image-pull-policy",
action="store",
default=DEFAULT_PULL_POLICY,
help="The pull policy of the Ingress Controller image.",
)
parser.addoption(
"--deployment-type",
action="store",
default=DEFAULT_DEPLOYMENT_TYPE,
help="The type of the IC deployment: deployment or daemon-set.",
)
parser.addoption(
"--ic-type",
action="store",
default=DEFAULT_IC_TYPE,
help="The type of the Ingress Controller: nginx-ingress or nginx-ingress-plus.",
)
parser.addoption(
"--service",
action="store",
default=DEFAULT_SERVICE,
help="The type of the Ingress Controller service: nodeport or loadbalancer.",
)
parser.addoption(
"--replicas",
action="store",
default=NUM_REPLICAS,
help="Number of replica pods for type deployment",
)
parser.addoption(
"--node-ip",
action="store",
help="The public IP of a cluster node. Not required if you use the loadbalancer service (see --service argument).",
)
parser.addoption(
"--kubeconfig",
action="store",
default=os.path.expanduser(KUBE_CONFIG_DEFAULT_LOCATION),
help="An absolute path to a kubeconfig file.",
)
parser.addoption(
"--show-ic-logs",
action="store",
default="no",
help="Show IC logs in stdout on test failure",
)
parser.addoption(
"--batch-start",
action="store",
default=BATCH_START,
help="Run tests for pods restarts with multiple resources deployed (Ingress/VS): True/False",
)
parser.addoption(
"--batch-resources",
action="store",
default=BATCH_RESOURCES,
help="Number of VS/Ingress resources to deploy",
)
# import fixtures into pytest global namespace
pytest_plugins = ["suite.fixtures"]
def pytest_collection_modifyitems(config, items) -> None:
"""
Skip tests marked with '@pytest.mark.skip_for_nginx_oss' for Nginx OSS runs.
Skip tests marked with '@pytest.mark.appprotect' for non AP images.
:param config: pytest config
:param items: pytest collected test-items
:return:
"""
if config.getoption("--ic-type") == "nginx-ingress":
skip_for_nginx_oss = pytest.mark.skip(reason="Skip a test for Nginx OSS")
for item in items:
if "skip_for_nginx_oss" in item.keywords:
item.add_marker(skip_for_nginx_oss)
if config.getoption("--ic-type") == "nginx-plus-ingress":
skip_for_nginx_plus = pytest.mark.skip(reason="Skip a test for Nginx Plus")
for item in items:
if "skip_for_nginx_plus" in item.keywords:
item.add_marker(skip_for_nginx_plus)
if "-ap" not in config.getoption("--image"):
appprotect = pytest.mark.skip(reason="Skip AppProtect test in non-AP image")
for item in items:
if "appprotect" in item.keywords:
item.add_marker(appprotect)
if str(config.getoption("--batch-start")) != "True":
batch_start = pytest.mark.skip(reason="Skipping pod restart test with multiple resources")
for item in items:
if "batch_start" in item.keywords:
item.add_marker(batch_start)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item) -> None:
"""
Print out IC Pod logs on test failure.
Only look at actual failing test calls, not setup/teardown.
Only show the logs if commandline argument `--show-ic-logs` is set to 'yes'
:param item:
:return:
"""
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# we only look at actual failing test calls, not setup/teardown
if (
rep.when == "call"
and rep.failed
and item.config.getoption("--show-ic-logs") == "yes"
):
pod_namespace = item.funcargs["ingress_controller_prerequisites"].namespace
pod_name = get_first_pod_name(item.funcargs["kube_apis"].v1, pod_namespace)
print("\n===================== IC Logs Start =====================")
print(
item.funcargs["kube_apis"].v1.read_namespaced_pod_log(
pod_name, pod_namespace
)
)
print("\n===================== IC Logs End =====================")
| 32.091463
| 123
| 0.620179
|
import os
import pytest
from kubernetes.config.kube_config import KUBE_CONFIG_DEFAULT_LOCATION
from settings import (
DEFAULT_IMAGE,
DEFAULT_PULL_POLICY,
DEFAULT_IC_TYPE,
DEFAULT_SERVICE,
DEFAULT_DEPLOYMENT_TYPE,
NUM_REPLICAS,
BATCH_START,
BATCH_RESOURCES,
)
from suite.resources_utils import get_first_pod_name
def pytest_addoption(parser) -> None:
parser.addoption(
"--context",
action="store",
default="",
help="The context to use in the kubeconfig file.",
)
parser.addoption(
"--image",
action="store",
default=DEFAULT_IMAGE,
help="The Ingress Controller image.",
)
parser.addoption(
"--image-pull-policy",
action="store",
default=DEFAULT_PULL_POLICY,
help="The pull policy of the Ingress Controller image.",
)
parser.addoption(
"--deployment-type",
action="store",
default=DEFAULT_DEPLOYMENT_TYPE,
help="The type of the IC deployment: deployment or daemon-set.",
)
parser.addoption(
"--ic-type",
action="store",
default=DEFAULT_IC_TYPE,
help="The type of the Ingress Controller: nginx-ingress or nginx-ingress-plus.",
)
parser.addoption(
"--service",
action="store",
default=DEFAULT_SERVICE,
help="The type of the Ingress Controller service: nodeport or loadbalancer.",
)
parser.addoption(
"--replicas",
action="store",
default=NUM_REPLICAS,
help="Number of replica pods for type deployment",
)
parser.addoption(
"--node-ip",
action="store",
help="The public IP of a cluster node. Not required if you use the loadbalancer service (see --service argument).",
)
parser.addoption(
"--kubeconfig",
action="store",
default=os.path.expanduser(KUBE_CONFIG_DEFAULT_LOCATION),
help="An absolute path to a kubeconfig file.",
)
parser.addoption(
"--show-ic-logs",
action="store",
default="no",
help="Show IC logs in stdout on test failure",
)
parser.addoption(
"--batch-start",
action="store",
default=BATCH_START,
help="Run tests for pods restarts with multiple resources deployed (Ingress/VS): True/False",
)
parser.addoption(
"--batch-resources",
action="store",
default=BATCH_RESOURCES,
help="Number of VS/Ingress resources to deploy",
)
pytest_plugins = ["suite.fixtures"]
def pytest_collection_modifyitems(config, items) -> None:
if config.getoption("--ic-type") == "nginx-ingress":
skip_for_nginx_oss = pytest.mark.skip(reason="Skip a test for Nginx OSS")
for item in items:
if "skip_for_nginx_oss" in item.keywords:
item.add_marker(skip_for_nginx_oss)
if config.getoption("--ic-type") == "nginx-plus-ingress":
skip_for_nginx_plus = pytest.mark.skip(reason="Skip a test for Nginx Plus")
for item in items:
if "skip_for_nginx_plus" in item.keywords:
item.add_marker(skip_for_nginx_plus)
if "-ap" not in config.getoption("--image"):
appprotect = pytest.mark.skip(reason="Skip AppProtect test in non-AP image")
for item in items:
if "appprotect" in item.keywords:
item.add_marker(appprotect)
if str(config.getoption("--batch-start")) != "True":
batch_start = pytest.mark.skip(reason="Skipping pod restart test with multiple resources")
for item in items:
if "batch_start" in item.keywords:
item.add_marker(batch_start)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item) -> None:
outcome = yield
rep = outcome.get_result()
if (
rep.when == "call"
and rep.failed
and item.config.getoption("--show-ic-logs") == "yes"
):
pod_namespace = item.funcargs["ingress_controller_prerequisites"].namespace
pod_name = get_first_pod_name(item.funcargs["kube_apis"].v1, pod_namespace)
print("\n===================== IC Logs Start =====================")
print(
item.funcargs["kube_apis"].v1.read_namespaced_pod_log(
pod_name, pod_namespace
)
)
print("\n===================== IC Logs End =====================")
| true
| true
|
7906df49e715c44b404cb978bc5b56dd63aa41ad
| 28,718
|
py
|
Python
|
src/tasks/lm.py
|
skysky77/MGNMT
|
19dded399a310cd118eee09bd37d657746d11cf1
|
[
"MIT"
] | 9
|
2021-01-11T05:49:29.000Z
|
2021-12-20T21:13:38.000Z
|
src/tasks/lm.py
|
skysky77/MGNMT
|
19dded399a310cd118eee09bd37d657746d11cf1
|
[
"MIT"
] | 1
|
2021-01-28T03:27:09.000Z
|
2021-02-19T05:58:56.000Z
|
src/tasks/lm.py
|
skysky77/MGNMT
|
19dded399a310cd118eee09bd37d657746d11cf1
|
[
"MIT"
] | 5
|
2021-01-11T05:49:39.000Z
|
2021-09-27T03:06:45.000Z
|
# MIT License
# Copyright (c) 2018 the NJUNMT-pytorch authors.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import random
import time
from copy import deepcopy
import numpy as np
import torch
import yaml
from tensorboardX import SummaryWriter
from tqdm import tqdm
from src.data.data_iterator import DataIterator
from src.data.dataset import TextLineDataset, ZipDataset
from src.data.vocabulary import Vocabulary
from src.decoding import beam_search, ensemble_beam_search
from src.decoding.beam_search import nmt_lm_fusion_beam_search
from src.metric.bleu_scorer import SacreBLEUScorer
from src.models import build_model
from src.modules.criterions import NMTCriterion
from src.optim import Optimizer
from src.optim.lr_scheduler import ReduceOnPlateauScheduler, NoamScheduler, RsqrtScheduler
from src.utils.common_utils import *
from src.utils.configs import default_configs, pretty_configs
from src.utils.logging import *
from src.utils.moving_average import MovingAverage
BOS = Vocabulary.BOS
EOS = Vocabulary.EOS
PAD = Vocabulary.PAD
def set_seed(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
def load_model_parameters(path, map_location="cpu"):
state_dict = torch.load(path, map_location=map_location)
if "model" in state_dict:
return state_dict["model"]
return state_dict
def split_shard(*inputs, split_size=1):
if split_size <= 1:
yield inputs
else:
lengths = [len(s) for s in inputs[-1]] #
sorted_indices = np.argsort(lengths)
# sorting inputs
inputs = [
[inp[ii] for ii in sorted_indices]
for inp in inputs
]
# split shards
total_batch = sorted_indices.shape[0] # total number of batches
if split_size >= total_batch:
yield inputs
else:
shard_size = total_batch // split_size
_indices = list(range(total_batch))[::shard_size] + [total_batch]
for beg, end in zip(_indices[:-1], _indices[1:]):
yield (inp[beg:end] for inp in inputs)
def prepare_data(seqs_x, seqs_y=None, cuda=False, batch_first=True):
"""
Args:
eval ('bool'): indicator for eval/infer.
Returns:
"""
def _np_pad_batch_2D(samples, pad, batch_first=True, cuda=True):
batch_size = len(samples)
sizes = [len(s) for s in samples]
max_size = max(sizes)
x_np = np.full((batch_size, max_size), fill_value=pad, dtype='int64')
for ii in range(batch_size):
x_np[ii, :sizes[ii]] = samples[ii]
if batch_first is False:
x_np = np.transpose(x_np, [1, 0])
x = torch.tensor(x_np)
if cuda is True:
x = x.cuda()
return x
seqs_x = list(map(lambda s: [BOS] + s + [EOS], seqs_x))
x = _np_pad_batch_2D(samples=seqs_x, pad=PAD,
cuda=cuda, batch_first=batch_first)
if seqs_y is None:
return x
seqs_y = list(map(lambda s: [BOS] + s + [EOS], seqs_y))
y = _np_pad_batch_2D(seqs_y, pad=PAD,
cuda=cuda, batch_first=batch_first)
return x, y
def compute_forward(model,
critic,
seqs_x,
eval=False,
normalization=1.0,
norm_by_words=False
):
"""
:type model: nn.Module
:type critic: NMTCriterion
"""
x_inp = seqs_x[:, :-1].contiguous()
x_label = seqs_x[:, 1:].contiguous()
words_norm = x_label.ne(PAD).float().sum(1)
if not eval:
model.train()
critic.train()
# For training
with torch.enable_grad():
log_probs = model(x_inp)
loss = critic(inputs=log_probs, labels=x_label, reduce=False,
normalization=normalization)
if norm_by_words:
loss = loss.div(words_norm).sum()
else:
loss = loss.sum()
torch.autograd.backward(loss)
return loss.item()
else:
model.eval()
critic.eval()
# For compute loss
with torch.no_grad():
log_probs = model(x_inp)
loss = critic(inputs=log_probs, labels=x_label, normalization=normalization, reduce=True)
return loss.item()
def loss_validation(model, critic, valid_iterator):
"""
:type model: Transformer
:type critic: NMTCriterion
:type valid_iterator: DataIterator
"""
n_sents = 0
n_tokens = 0.0
sum_loss = 0.0
valid_iter = valid_iterator.build_generator()
for batch in valid_iter:
_, seqs_x = batch
n_sents += len(seqs_x)
n_tokens += sum(len(s) for s in seqs_x)
x = prepare_data(seqs_x, cuda=GlobalNames.USE_GPU)
loss = compute_forward(model=model,
critic=critic,
seqs_x=x,
eval=True)
if np.isnan(loss):
WARN("NaN detected!")
sum_loss += float(loss)
return float(sum_loss / n_sents)
def bleu_validation(uidx,
valid_iterator,
model,
bleu_scorer,
vocab_tgt,
batch_size,
valid_dir="./valid",
max_steps=10,
beam_size=5,
alpha=-1.0
):
model.eval()
numbers = []
trans = []
infer_progress_bar = tqdm(total=len(valid_iterator),
desc=' - (Infer) ',
unit="sents")
valid_iter = valid_iterator.build_generator(batch_size=batch_size)
for batch in valid_iter:
seq_nums = batch[0]
numbers += seq_nums
seqs_x = batch[1]
infer_progress_bar.update(len(seqs_x))
x = prepare_data(seqs_x, cuda=GlobalNames.USE_GPU)
with torch.no_grad():
word_ids = beam_search(nmt_model=model,
beam_size=beam_size,
max_steps=max_steps,
src_seqs=x, alpha=alpha)
word_ids = word_ids.cpu().numpy().tolist()
# Append result
for sent_t in word_ids:
sent_t = [[wid for wid in line if wid != PAD] for line in sent_t]
x_tokens = []
for wid in sent_t[0]:
if wid == EOS:
break
x_tokens.append(vocab_tgt.id2token(wid))
if len(x_tokens) > 0:
trans.append(vocab_tgt.tokenizer.detokenize(x_tokens))
else:
trans.append('%s' % vocab_tgt.id2token(EOS))
origin_order = np.argsort(numbers).tolist()
trans = [trans[ii] for ii in origin_order]
infer_progress_bar.close()
if not os.path.exists(valid_dir):
os.mkdir(valid_dir)
hyp_path = os.path.join(valid_dir, 'trans.iter{0}.txt'.format(uidx))
with open(hyp_path, 'w') as f:
for line in trans:
f.write('%s\n' % line)
with open(hyp_path) as f:
bleu_v = bleu_scorer.corpus_bleu(f)
return bleu_v
def load_pretrained_model(nmt_model, pretrain_path, device, exclude_prefix=None):
"""
Args:
nmt_model: model.
pretrain_path ('str'): path to pretrained model.
map_dict ('dict'): mapping specific parameter names to those names
in current model.
exclude_prefix ('dict'): excluding parameters with specific names
for pretraining.
Raises:
ValueError: Size not match, parameter name not match or others.
"""
if exclude_prefix is None:
exclude_prefix = []
if pretrain_path != "":
INFO("Loading pretrained model from {}".format(pretrain_path))
pretrain_params = torch.load(pretrain_path, map_location=device)
for name, params in pretrain_params.items():
flag = False
for pp in exclude_prefix:
if name.startswith(pp):
flag = True
break
if flag:
continue
INFO("Loading param: {}...".format(name))
try:
nmt_model.load_state_dict({name: params}, strict=False)
except Exception as e:
WARN("{}: {}".format(str(Exception), e))
INFO("Pretrained model loaded.")
def train(FLAGS):
"""
FLAGS:
saveto: str
reload: store_true
config_path: str
pretrain_path: str, default=""
model_name: str
log_path: str
"""
# write log of training to file.
write_log_to_file(os.path.join(FLAGS.log_path, "%s.log" % time.strftime("%Y%m%d-%H%M%S")))
GlobalNames.USE_GPU = FLAGS.use_gpu
if GlobalNames.USE_GPU:
CURRENT_DEVICE = "cpu"
else:
CURRENT_DEVICE = "cuda:0"
config_path = os.path.abspath(FLAGS.config_path)
with open(config_path.strip()) as f:
configs = yaml.load(f)
INFO(pretty_configs(configs))
# Add default configs
configs = default_configs(configs)
data_configs = configs['data_configs']
model_configs = configs['model_configs']
optimizer_configs = configs['optimizer_configs']
training_configs = configs['training_configs']
GlobalNames.SEED = training_configs['seed']
set_seed(GlobalNames.SEED)
best_model_prefix = os.path.join(FLAGS.saveto, FLAGS.model_name + GlobalNames.MY_BEST_MODEL_SUFFIX)
timer = Timer()
# ================================================================================== #
# Load Data
INFO('Loading data...')
timer.tic()
# Generate target dictionary
vocab_src = Vocabulary(**data_configs["vocabularies"][0])
train_batch_size = training_configs["batch_size"] * max(1, training_configs["update_cycle"])
train_buffer_size = training_configs["buffer_size"] * max(1, training_configs["update_cycle"])
train_bitext_dataset = ZipDataset(
TextLineDataset(data_path=data_configs['train_data'][0],
vocabulary=vocab_src,
max_len=data_configs['max_len'][0],
),
shuffle=training_configs['shuffle']
)
valid_bitext_dataset = ZipDataset(
TextLineDataset(data_path=data_configs['valid_data'][0],
vocabulary=vocab_src,
),
)
training_iterator = DataIterator(dataset=train_bitext_dataset,
batch_size=train_batch_size,
use_bucket=training_configs['use_bucket'],
buffer_size=train_buffer_size,
batching_func=training_configs['batching_key'])
valid_iterator = DataIterator(dataset=valid_bitext_dataset,
batch_size=training_configs['valid_batch_size'],
use_bucket=True, buffer_size=100000, numbering=True)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
lrate = optimizer_configs['learning_rate']
is_early_stop = False
# ================================ Begin ======================================== #
# Build Model & Optimizer
# We would do steps below on after another
# 1. build models & criterion
# 2. move models & criterion to gpu if needed
# 3. load pre-trained model if needed
# 4. build optimizer
# 5. build learning rate scheduler if needed
# 6. load checkpoints if needed
# 0. Initial
model_collections = Collections()
checkpoint_saver = Saver(save_prefix="{0}.ckpt".format(os.path.join(FLAGS.saveto, FLAGS.model_name)),
num_max_keeping=training_configs['num_kept_checkpoints']
)
best_model_saver = BestKSaver(save_prefix="{0}.best".format(os.path.join(FLAGS.saveto, FLAGS.model_name)),
num_max_keeping=training_configs["num_kept_best_checkpoints"])
# 1. Build Model & Criterion
INFO('Building model...')
timer.tic()
nmt_model = build_model(n_words=vocab_src.max_n_words, **model_configs)
INFO(nmt_model)
params_total = sum([p.numel() for n, p in nmt_model.named_parameters()])
params_with_embedding = sum([p.numel() for n, p in nmt_model.named_parameters() if n.find('embedding') == -1])
INFO('Total parameters: {}'.format(params_total))
INFO('Total parameters (excluding word embeddings): {}'.format(params_with_embedding))
critic = NMTCriterion(label_smoothing=model_configs['label_smoothing'])
INFO(critic)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
# 2. Move to GPU
if GlobalNames.USE_GPU:
nmt_model = nmt_model.cuda()
critic = critic.cuda()
# 3. Load pretrained model if needed
load_pretrained_model(nmt_model, FLAGS.pretrain_path, exclude_prefix=None, device=CURRENT_DEVICE)
# 4. Build optimizer
INFO('Building Optimizer...')
optim = Optimizer(name=optimizer_configs['optimizer'],
model=nmt_model,
lr=lrate,
grad_clip=optimizer_configs['grad_clip'],
optim_args=optimizer_configs['optimizer_params']
)
# 5. Build scheduler for optimizer if needed
if optimizer_configs['schedule_method'] is not None:
if optimizer_configs['schedule_method'] == "loss":
scheduler = ReduceOnPlateauScheduler(optimizer=optim,
**optimizer_configs["scheduler_configs"]
)
elif optimizer_configs['schedule_method'] == "noam":
scheduler = NoamScheduler(optimizer=optim, **optimizer_configs['scheduler_configs'])
elif optimizer_configs["schedule_method"] == "rsqrt":
scheduler = RsqrtScheduler(optimizer=optim, **optimizer_configs['scheduler_configs'])
else:
WARN("Unknown scheduler name {0}. Do not use lr_scheduling.".format(optimizer_configs['schedule_method']))
scheduler = None
else:
scheduler = None
# 6. build moving average
if training_configs['moving_average_method'] is not None:
ma = MovingAverage(moving_average_method=training_configs['moving_average_method'],
named_params=nmt_model.named_parameters(),
alpha=training_configs['moving_average_alpha'])
else:
ma = None
INFO('Done. Elapsed time {0}'.format(timer.toc()))
# Reload from latest checkpoint
if FLAGS.reload:
checkpoint_saver.load_latest(model=nmt_model, optim=optim, lr_scheduler=scheduler,
collections=model_collections, ma=ma)
# ================================================================================== #
# Prepare training
eidx = model_collections.get_collection("eidx", [0])[-1]
uidx = model_collections.get_collection("uidx", [0])[-1]
bad_count = model_collections.get_collection("bad_count", [0])[-1]
oom_count = model_collections.get_collection("oom_count", [0])[-1]
summary_writer = SummaryWriter(log_dir=FLAGS.log_path)
cum_samples = 0
cum_words = 0
valid_loss = best_valid_loss = float('inf') # Max Float
saving_files = []
# Timer for computing speed
timer_for_speed = Timer()
timer_for_speed.tic()
INFO('Begin training...')
while True:
summary_writer.add_scalar("Epoch", (eidx + 1), uidx)
# Build iterator and progress bar
training_iter = training_iterator.build_generator()
training_progress_bar = tqdm(desc=' - (Epc {}, Upd {}) '.format(eidx, uidx),
total=len(training_iterator),
unit="sents"
)
for batch in training_iter:
uidx += 1
if optimizer_configs["schedule_method"] is not None and optimizer_configs["schedule_method"] != "loss":
scheduler.step(global_step=uidx)
seqs_x = batch
n_samples_t = len(seqs_x)
n_words_t = sum(len(s) for s in seqs_x)
cum_samples += n_samples_t
cum_words += n_words_t
train_loss = 0.
optim.zero_grad()
try:
# Prepare data
for seqs_x_t, in split_shard(seqs_x, split_size=training_configs['update_cycle']):
x = prepare_data(seqs_x_t, cuda=GlobalNames.USE_GPU)
loss = compute_forward(model=nmt_model,
critic=critic,
seqs_x=x,
eval=False,
normalization=n_samples_t,
norm_by_words=training_configs["norm_by_words"])
train_loss += loss / x.size(1)
optim.step()
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
oom_count += 1
optim.zero_grad()
else:
raise e
if ma is not None and eidx >= training_configs['moving_average_start_epoch']:
ma.step()
training_progress_bar.update(n_samples_t)
training_progress_bar.set_description(' - (Epc {}, Upd {}) '.format(eidx, uidx))
training_progress_bar.set_postfix_str(
'TrainLoss: {:.2f}, ValidLoss(best): {:.2f} ({:.2f})'.format(train_loss, valid_loss, best_valid_loss))
summary_writer.add_scalar("train_loss", scalar_value=train_loss, global_step=uidx)
# ================================================================================== #
# Display some information
if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['disp_freq']):
# words per second and sents per second
words_per_sec = cum_words / (timer.toc(return_seconds=True))
sents_per_sec = cum_samples / (timer.toc(return_seconds=True))
lrate = list(optim.get_lrate())[0]
summary_writer.add_scalar("Speed(words/sec)", scalar_value=words_per_sec, global_step=uidx)
summary_writer.add_scalar("Speed(sents/sen)", scalar_value=sents_per_sec, global_step=uidx)
summary_writer.add_scalar("lrate", scalar_value=lrate, global_step=uidx)
summary_writer.add_scalar("oom_count", scalar_value=oom_count, global_step=uidx)
# Reset timer
timer.tic()
cum_words = 0
cum_samples = 0
# ================================================================================== #
# Loss Validation & Learning rate annealing
if should_trigger_by_steps(global_step=uidx, n_epoch=eidx, every_n_step=training_configs['loss_valid_freq'],
debug=FLAGS.debug):
if ma is not None:
origin_state_dict = deepcopy(nmt_model.state_dict())
nmt_model.load_state_dict(ma.export_ma_params(), strict=False)
valid_loss = loss_validation(model=nmt_model,
critic=critic,
valid_iterator=valid_iterator,
)
model_collections.add_to_collection("history_losses", valid_loss)
min_history_loss = np.array(model_collections.get_collection("history_losses")).min()
summary_writer.add_scalar("loss", valid_loss, global_step=uidx)
summary_writer.add_scalar("best_loss", min_history_loss, global_step=uidx)
best_valid_loss = min_history_loss
if ma is not None:
nmt_model.load_state_dict(origin_state_dict)
del origin_state_dict
if optimizer_configs["schedule_method"] == "loss":
scheduler.step(global_step=uidx, metric=best_valid_loss)
# If model get new best valid bleu score
if valid_loss < best_valid_loss:
bad_count = 0
if is_early_stop is False:
# 1. save the best model's parameters
torch.save(nmt_model.state_dict(), best_model_prefix + ".final")
# 2. save the best checkpoint
model_collections.add_to_collection("uidx", uidx)
model_collections.add_to_collection("eidx", eidx)
model_collections.add_to_collection("bad_count", bad_count)
best_model_saver.save(global_step=uidx, metric=valid_loss,
model=nmt_model,
optim=optim,
lr_scheduler=scheduler,
collections=model_collections,
ma=ma)
else:
bad_count += 1
# At least one epoch should be traversed
if bad_count >= training_configs['early_stop_patience'] and eidx > 0:
is_early_stop = True
WARN("Early Stop!")
summary_writer.add_scalar("bad_count", bad_count, uidx)
INFO("{0} Loss: {1:.2f} lrate: {2:6f} patience: {3}".format(
uidx, valid_loss, lrate, bad_count
))
# ================================================================================== #
# Saving checkpoints
if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['save_freq'], debug=FLAGS.debug):
model_collections.add_to_collection("uidx", uidx)
model_collections.add_to_collection("eidx", eidx)
model_collections.add_to_collection("bad_count", bad_count)
if not is_early_stop:
checkpoint_saver.save(global_step=uidx,
model=nmt_model,
optim=optim,
lr_scheduler=scheduler,
collections=model_collections,
ma=ma)
training_progress_bar.close()
eidx += 1
if eidx > training_configs["max_epochs"]:
break
def nmt_lm_fusion_translate(FLAGS):
GlobalNames.USE_GPU = FLAGS.use_gpu
config_path = os.path.abspath(FLAGS.config_path)
with open(config_path.strip()) as f:
configs = yaml.load(f)
data_configs = configs['data_configs']
nmt_model_configs = configs['nmt_model_configs']
lm_model_configs = configs['lm_model_configs']
timer = Timer()
# ================================================================================== #
# Load Data
INFO('Loading data...')
timer.tic()
# Generate target dictionary
vocab_src = Vocabulary(**data_configs["vocabularies"][0])
vocab_tgt = Vocabulary(**data_configs["vocabularies"][1])
valid_dataset = TextLineDataset(data_path=FLAGS.source_path,
vocabulary=vocab_src)
valid_iterator = DataIterator(dataset=valid_dataset,
batch_size=FLAGS.batch_size,
use_bucket=True, buffer_size=100000, numbering=True)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
# ================================================================================== #
# Build Model & Sampler & Validation
INFO('Building model...')
timer.tic()
nmt_model_path = FLAGS.nmt_model_path
lm_model_path = FLAGS.lm_model_path
nmt_model = build_model(n_src_vocab=vocab_src.max_n_words,
n_tgt_vocab=vocab_tgt.max_n_words, **nmt_model_configs)
lm_model = build_model(n_words=vocab_tgt.max_n_words, **lm_model_configs)
nmt_model.eval()
lm_model.eval()
INFO('Done. Elapsed time {0}'.format(timer.toc()))
INFO('Reloading model parameters...')
timer.tic()
nmt_params = load_model_parameters(nmt_model_path, map_location="cpu")
lm_params = load_model_parameters(lm_model_path, map_location="cpu")
nmt_model.load_state_dict(nmt_params)
lm_model.load_state_dict(lm_params)
if GlobalNames.USE_GPU:
nmt_model.cuda()
lm_model.cuda()
INFO('Done. Elapsed time {0}'.format(timer.toc()))
INFO('Begin...')
result_numbers = []
result = []
n_words = 0
timer.tic()
infer_progress_bar = tqdm(total=len(valid_iterator),
desc=' - (Infer) ',
unit="sents")
valid_iter = valid_iterator.build_generator()
for batch in valid_iter:
numbers, seqs_x = batch
batch_size_t = len(seqs_x)
x = prepare_data(seqs_x=seqs_x, cuda=GlobalNames.USE_GPU)
with torch.no_grad():
word_ids = nmt_lm_fusion_beam_search(nmt_model=nmt_model, lm_model=lm_model,
beam_size=FLAGS.beam_size,
max_steps=FLAGS.max_steps,
src_seqs=x,
alpha=FLAGS.alpha,
beta=FLAGS.beta)
word_ids = word_ids.cpu().numpy().tolist()
result_numbers += numbers
# Append result
for sent_t in word_ids:
sent_t = [[wid for wid in line if wid != PAD] for line in sent_t]
result.append(sent_t)
n_words += len(sent_t[0])
infer_progress_bar.update(batch_size_t)
infer_progress_bar.close()
INFO('Done. Speed: {0:.2f} words/sec'.format(n_words / (timer.toc(return_seconds=True))))
translation = []
for sent in result:
samples = []
for trans in sent:
sample = []
for w in trans:
if w == vocab_tgt.EOS:
break
sample.append(vocab_tgt.id2token(w))
samples.append(vocab_tgt.tokenizer.detokenize(sample))
translation.append(samples)
# resume the ordering
origin_order = np.argsort(result_numbers).tolist()
translation = [translation[ii] for ii in origin_order]
with open(FLAGS.saveto, 'w') as f:
for trans in translation:
f.write("%s\n"%trans[0])
if __name__ == '__main__':
_args = {
"model_name": "test_rnnlm",
"reload": False,
"config_path": "./configs/test_rnnlm.yaml",
"debug": True,
"use_gpu": False,
"task": "lm",
"log_path": "/tmp",
"saveto": "/tmp",
"valid_path": "/tmp",
}
from src.bin import train as _train
_train.run(**_args)
| 34.516827
| 120
| 0.566683
|
import os
import random
import time
from copy import deepcopy
import numpy as np
import torch
import yaml
from tensorboardX import SummaryWriter
from tqdm import tqdm
from src.data.data_iterator import DataIterator
from src.data.dataset import TextLineDataset, ZipDataset
from src.data.vocabulary import Vocabulary
from src.decoding import beam_search, ensemble_beam_search
from src.decoding.beam_search import nmt_lm_fusion_beam_search
from src.metric.bleu_scorer import SacreBLEUScorer
from src.models import build_model
from src.modules.criterions import NMTCriterion
from src.optim import Optimizer
from src.optim.lr_scheduler import ReduceOnPlateauScheduler, NoamScheduler, RsqrtScheduler
from src.utils.common_utils import *
from src.utils.configs import default_configs, pretty_configs
from src.utils.logging import *
from src.utils.moving_average import MovingAverage
BOS = Vocabulary.BOS
EOS = Vocabulary.EOS
PAD = Vocabulary.PAD
def set_seed(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
def load_model_parameters(path, map_location="cpu"):
state_dict = torch.load(path, map_location=map_location)
if "model" in state_dict:
return state_dict["model"]
return state_dict
def split_shard(*inputs, split_size=1):
if split_size <= 1:
yield inputs
else:
lengths = [len(s) for s in inputs[-1]]
sorted_indices = np.argsort(lengths)
inputs = [
[inp[ii] for ii in sorted_indices]
for inp in inputs
]
total_batch = sorted_indices.shape[0]
if split_size >= total_batch:
yield inputs
else:
shard_size = total_batch // split_size
_indices = list(range(total_batch))[::shard_size] + [total_batch]
for beg, end in zip(_indices[:-1], _indices[1:]):
yield (inp[beg:end] for inp in inputs)
def prepare_data(seqs_x, seqs_y=None, cuda=False, batch_first=True):
def _np_pad_batch_2D(samples, pad, batch_first=True, cuda=True):
batch_size = len(samples)
sizes = [len(s) for s in samples]
max_size = max(sizes)
x_np = np.full((batch_size, max_size), fill_value=pad, dtype='int64')
for ii in range(batch_size):
x_np[ii, :sizes[ii]] = samples[ii]
if batch_first is False:
x_np = np.transpose(x_np, [1, 0])
x = torch.tensor(x_np)
if cuda is True:
x = x.cuda()
return x
seqs_x = list(map(lambda s: [BOS] + s + [EOS], seqs_x))
x = _np_pad_batch_2D(samples=seqs_x, pad=PAD,
cuda=cuda, batch_first=batch_first)
if seqs_y is None:
return x
seqs_y = list(map(lambda s: [BOS] + s + [EOS], seqs_y))
y = _np_pad_batch_2D(seqs_y, pad=PAD,
cuda=cuda, batch_first=batch_first)
return x, y
def compute_forward(model,
critic,
seqs_x,
eval=False,
normalization=1.0,
norm_by_words=False
):
x_inp = seqs_x[:, :-1].contiguous()
x_label = seqs_x[:, 1:].contiguous()
words_norm = x_label.ne(PAD).float().sum(1)
if not eval:
model.train()
critic.train()
with torch.enable_grad():
log_probs = model(x_inp)
loss = critic(inputs=log_probs, labels=x_label, reduce=False,
normalization=normalization)
if norm_by_words:
loss = loss.div(words_norm).sum()
else:
loss = loss.sum()
torch.autograd.backward(loss)
return loss.item()
else:
model.eval()
critic.eval()
with torch.no_grad():
log_probs = model(x_inp)
loss = critic(inputs=log_probs, labels=x_label, normalization=normalization, reduce=True)
return loss.item()
def loss_validation(model, critic, valid_iterator):
n_sents = 0
n_tokens = 0.0
sum_loss = 0.0
valid_iter = valid_iterator.build_generator()
for batch in valid_iter:
_, seqs_x = batch
n_sents += len(seqs_x)
n_tokens += sum(len(s) for s in seqs_x)
x = prepare_data(seqs_x, cuda=GlobalNames.USE_GPU)
loss = compute_forward(model=model,
critic=critic,
seqs_x=x,
eval=True)
if np.isnan(loss):
WARN("NaN detected!")
sum_loss += float(loss)
return float(sum_loss / n_sents)
def bleu_validation(uidx,
valid_iterator,
model,
bleu_scorer,
vocab_tgt,
batch_size,
valid_dir="./valid",
max_steps=10,
beam_size=5,
alpha=-1.0
):
model.eval()
numbers = []
trans = []
infer_progress_bar = tqdm(total=len(valid_iterator),
desc=' - (Infer) ',
unit="sents")
valid_iter = valid_iterator.build_generator(batch_size=batch_size)
for batch in valid_iter:
seq_nums = batch[0]
numbers += seq_nums
seqs_x = batch[1]
infer_progress_bar.update(len(seqs_x))
x = prepare_data(seqs_x, cuda=GlobalNames.USE_GPU)
with torch.no_grad():
word_ids = beam_search(nmt_model=model,
beam_size=beam_size,
max_steps=max_steps,
src_seqs=x, alpha=alpha)
word_ids = word_ids.cpu().numpy().tolist()
for sent_t in word_ids:
sent_t = [[wid for wid in line if wid != PAD] for line in sent_t]
x_tokens = []
for wid in sent_t[0]:
if wid == EOS:
break
x_tokens.append(vocab_tgt.id2token(wid))
if len(x_tokens) > 0:
trans.append(vocab_tgt.tokenizer.detokenize(x_tokens))
else:
trans.append('%s' % vocab_tgt.id2token(EOS))
origin_order = np.argsort(numbers).tolist()
trans = [trans[ii] for ii in origin_order]
infer_progress_bar.close()
if not os.path.exists(valid_dir):
os.mkdir(valid_dir)
hyp_path = os.path.join(valid_dir, 'trans.iter{0}.txt'.format(uidx))
with open(hyp_path, 'w') as f:
for line in trans:
f.write('%s\n' % line)
with open(hyp_path) as f:
bleu_v = bleu_scorer.corpus_bleu(f)
return bleu_v
def load_pretrained_model(nmt_model, pretrain_path, device, exclude_prefix=None):
if exclude_prefix is None:
exclude_prefix = []
if pretrain_path != "":
INFO("Loading pretrained model from {}".format(pretrain_path))
pretrain_params = torch.load(pretrain_path, map_location=device)
for name, params in pretrain_params.items():
flag = False
for pp in exclude_prefix:
if name.startswith(pp):
flag = True
break
if flag:
continue
INFO("Loading param: {}...".format(name))
try:
nmt_model.load_state_dict({name: params}, strict=False)
except Exception as e:
WARN("{}: {}".format(str(Exception), e))
INFO("Pretrained model loaded.")
def train(FLAGS):
write_log_to_file(os.path.join(FLAGS.log_path, "%s.log" % time.strftime("%Y%m%d-%H%M%S")))
GlobalNames.USE_GPU = FLAGS.use_gpu
if GlobalNames.USE_GPU:
CURRENT_DEVICE = "cpu"
else:
CURRENT_DEVICE = "cuda:0"
config_path = os.path.abspath(FLAGS.config_path)
with open(config_path.strip()) as f:
configs = yaml.load(f)
INFO(pretty_configs(configs))
configs = default_configs(configs)
data_configs = configs['data_configs']
model_configs = configs['model_configs']
optimizer_configs = configs['optimizer_configs']
training_configs = configs['training_configs']
GlobalNames.SEED = training_configs['seed']
set_seed(GlobalNames.SEED)
best_model_prefix = os.path.join(FLAGS.saveto, FLAGS.model_name + GlobalNames.MY_BEST_MODEL_SUFFIX)
timer = Timer()
INFO('Loading data...')
timer.tic()
vocab_src = Vocabulary(**data_configs["vocabularies"][0])
train_batch_size = training_configs["batch_size"] * max(1, training_configs["update_cycle"])
train_buffer_size = training_configs["buffer_size"] * max(1, training_configs["update_cycle"])
train_bitext_dataset = ZipDataset(
TextLineDataset(data_path=data_configs['train_data'][0],
vocabulary=vocab_src,
max_len=data_configs['max_len'][0],
),
shuffle=training_configs['shuffle']
)
valid_bitext_dataset = ZipDataset(
TextLineDataset(data_path=data_configs['valid_data'][0],
vocabulary=vocab_src,
),
)
training_iterator = DataIterator(dataset=train_bitext_dataset,
batch_size=train_batch_size,
use_bucket=training_configs['use_bucket'],
buffer_size=train_buffer_size,
batching_func=training_configs['batching_key'])
valid_iterator = DataIterator(dataset=valid_bitext_dataset,
batch_size=training_configs['valid_batch_size'],
use_bucket=True, buffer_size=100000, numbering=True)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
lrate = optimizer_configs['learning_rate']
is_early_stop = False
model_collections = Collections()
checkpoint_saver = Saver(save_prefix="{0}.ckpt".format(os.path.join(FLAGS.saveto, FLAGS.model_name)),
num_max_keeping=training_configs['num_kept_checkpoints']
)
best_model_saver = BestKSaver(save_prefix="{0}.best".format(os.path.join(FLAGS.saveto, FLAGS.model_name)),
num_max_keeping=training_configs["num_kept_best_checkpoints"])
INFO('Building model...')
timer.tic()
nmt_model = build_model(n_words=vocab_src.max_n_words, **model_configs)
INFO(nmt_model)
params_total = sum([p.numel() for n, p in nmt_model.named_parameters()])
params_with_embedding = sum([p.numel() for n, p in nmt_model.named_parameters() if n.find('embedding') == -1])
INFO('Total parameters: {}'.format(params_total))
INFO('Total parameters (excluding word embeddings): {}'.format(params_with_embedding))
critic = NMTCriterion(label_smoothing=model_configs['label_smoothing'])
INFO(critic)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
if GlobalNames.USE_GPU:
nmt_model = nmt_model.cuda()
critic = critic.cuda()
load_pretrained_model(nmt_model, FLAGS.pretrain_path, exclude_prefix=None, device=CURRENT_DEVICE)
INFO('Building Optimizer...')
optim = Optimizer(name=optimizer_configs['optimizer'],
model=nmt_model,
lr=lrate,
grad_clip=optimizer_configs['grad_clip'],
optim_args=optimizer_configs['optimizer_params']
)
if optimizer_configs['schedule_method'] is not None:
if optimizer_configs['schedule_method'] == "loss":
scheduler = ReduceOnPlateauScheduler(optimizer=optim,
**optimizer_configs["scheduler_configs"]
)
elif optimizer_configs['schedule_method'] == "noam":
scheduler = NoamScheduler(optimizer=optim, **optimizer_configs['scheduler_configs'])
elif optimizer_configs["schedule_method"] == "rsqrt":
scheduler = RsqrtScheduler(optimizer=optim, **optimizer_configs['scheduler_configs'])
else:
WARN("Unknown scheduler name {0}. Do not use lr_scheduling.".format(optimizer_configs['schedule_method']))
scheduler = None
else:
scheduler = None
if training_configs['moving_average_method'] is not None:
ma = MovingAverage(moving_average_method=training_configs['moving_average_method'],
named_params=nmt_model.named_parameters(),
alpha=training_configs['moving_average_alpha'])
else:
ma = None
INFO('Done. Elapsed time {0}'.format(timer.toc()))
if FLAGS.reload:
checkpoint_saver.load_latest(model=nmt_model, optim=optim, lr_scheduler=scheduler,
collections=model_collections, ma=ma)
eidx = model_collections.get_collection("eidx", [0])[-1]
uidx = model_collections.get_collection("uidx", [0])[-1]
bad_count = model_collections.get_collection("bad_count", [0])[-1]
oom_count = model_collections.get_collection("oom_count", [0])[-1]
summary_writer = SummaryWriter(log_dir=FLAGS.log_path)
cum_samples = 0
cum_words = 0
valid_loss = best_valid_loss = float('inf')
saving_files = []
timer_for_speed = Timer()
timer_for_speed.tic()
INFO('Begin training...')
while True:
summary_writer.add_scalar("Epoch", (eidx + 1), uidx)
training_iter = training_iterator.build_generator()
training_progress_bar = tqdm(desc=' - (Epc {}, Upd {}) '.format(eidx, uidx),
total=len(training_iterator),
unit="sents"
)
for batch in training_iter:
uidx += 1
if optimizer_configs["schedule_method"] is not None and optimizer_configs["schedule_method"] != "loss":
scheduler.step(global_step=uidx)
seqs_x = batch
n_samples_t = len(seqs_x)
n_words_t = sum(len(s) for s in seqs_x)
cum_samples += n_samples_t
cum_words += n_words_t
train_loss = 0.
optim.zero_grad()
try:
for seqs_x_t, in split_shard(seqs_x, split_size=training_configs['update_cycle']):
x = prepare_data(seqs_x_t, cuda=GlobalNames.USE_GPU)
loss = compute_forward(model=nmt_model,
critic=critic,
seqs_x=x,
eval=False,
normalization=n_samples_t,
norm_by_words=training_configs["norm_by_words"])
train_loss += loss / x.size(1)
optim.step()
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
oom_count += 1
optim.zero_grad()
else:
raise e
if ma is not None and eidx >= training_configs['moving_average_start_epoch']:
ma.step()
training_progress_bar.update(n_samples_t)
training_progress_bar.set_description(' - (Epc {}, Upd {}) '.format(eidx, uidx))
training_progress_bar.set_postfix_str(
'TrainLoss: {:.2f}, ValidLoss(best): {:.2f} ({:.2f})'.format(train_loss, valid_loss, best_valid_loss))
summary_writer.add_scalar("train_loss", scalar_value=train_loss, global_step=uidx)
if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['disp_freq']):
words_per_sec = cum_words / (timer.toc(return_seconds=True))
sents_per_sec = cum_samples / (timer.toc(return_seconds=True))
lrate = list(optim.get_lrate())[0]
summary_writer.add_scalar("Speed(words/sec)", scalar_value=words_per_sec, global_step=uidx)
summary_writer.add_scalar("Speed(sents/sen)", scalar_value=sents_per_sec, global_step=uidx)
summary_writer.add_scalar("lrate", scalar_value=lrate, global_step=uidx)
summary_writer.add_scalar("oom_count", scalar_value=oom_count, global_step=uidx)
timer.tic()
cum_words = 0
cum_samples = 0
if should_trigger_by_steps(global_step=uidx, n_epoch=eidx, every_n_step=training_configs['loss_valid_freq'],
debug=FLAGS.debug):
if ma is not None:
origin_state_dict = deepcopy(nmt_model.state_dict())
nmt_model.load_state_dict(ma.export_ma_params(), strict=False)
valid_loss = loss_validation(model=nmt_model,
critic=critic,
valid_iterator=valid_iterator,
)
model_collections.add_to_collection("history_losses", valid_loss)
min_history_loss = np.array(model_collections.get_collection("history_losses")).min()
summary_writer.add_scalar("loss", valid_loss, global_step=uidx)
summary_writer.add_scalar("best_loss", min_history_loss, global_step=uidx)
best_valid_loss = min_history_loss
if ma is not None:
nmt_model.load_state_dict(origin_state_dict)
del origin_state_dict
if optimizer_configs["schedule_method"] == "loss":
scheduler.step(global_step=uidx, metric=best_valid_loss)
if valid_loss < best_valid_loss:
bad_count = 0
if is_early_stop is False:
torch.save(nmt_model.state_dict(), best_model_prefix + ".final")
# 2. save the best checkpoint
model_collections.add_to_collection("uidx", uidx)
model_collections.add_to_collection("eidx", eidx)
model_collections.add_to_collection("bad_count", bad_count)
best_model_saver.save(global_step=uidx, metric=valid_loss,
model=nmt_model,
optim=optim,
lr_scheduler=scheduler,
collections=model_collections,
ma=ma)
else:
bad_count += 1
# At least one epoch should be traversed
if bad_count >= training_configs['early_stop_patience'] and eidx > 0:
is_early_stop = True
WARN("Early Stop!")
summary_writer.add_scalar("bad_count", bad_count, uidx)
INFO("{0} Loss: {1:.2f} lrate: {2:6f} patience: {3}".format(
uidx, valid_loss, lrate, bad_count
))
# ================================================================================== #
# Saving checkpoints
if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['save_freq'], debug=FLAGS.debug):
model_collections.add_to_collection("uidx", uidx)
model_collections.add_to_collection("eidx", eidx)
model_collections.add_to_collection("bad_count", bad_count)
if not is_early_stop:
checkpoint_saver.save(global_step=uidx,
model=nmt_model,
optim=optim,
lr_scheduler=scheduler,
collections=model_collections,
ma=ma)
training_progress_bar.close()
eidx += 1
if eidx > training_configs["max_epochs"]:
break
def nmt_lm_fusion_translate(FLAGS):
GlobalNames.USE_GPU = FLAGS.use_gpu
config_path = os.path.abspath(FLAGS.config_path)
with open(config_path.strip()) as f:
configs = yaml.load(f)
data_configs = configs['data_configs']
nmt_model_configs = configs['nmt_model_configs']
lm_model_configs = configs['lm_model_configs']
timer = Timer()
# ================================================================================== #
# Load Data
INFO('Loading data...')
timer.tic()
# Generate target dictionary
vocab_src = Vocabulary(**data_configs["vocabularies"][0])
vocab_tgt = Vocabulary(**data_configs["vocabularies"][1])
valid_dataset = TextLineDataset(data_path=FLAGS.source_path,
vocabulary=vocab_src)
valid_iterator = DataIterator(dataset=valid_dataset,
batch_size=FLAGS.batch_size,
use_bucket=True, buffer_size=100000, numbering=True)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
# ================================================================================== #
# Build Model & Sampler & Validation
INFO('Building model...')
timer.tic()
nmt_model_path = FLAGS.nmt_model_path
lm_model_path = FLAGS.lm_model_path
nmt_model = build_model(n_src_vocab=vocab_src.max_n_words,
n_tgt_vocab=vocab_tgt.max_n_words, **nmt_model_configs)
lm_model = build_model(n_words=vocab_tgt.max_n_words, **lm_model_configs)
nmt_model.eval()
lm_model.eval()
INFO('Done. Elapsed time {0}'.format(timer.toc()))
INFO('Reloading model parameters...')
timer.tic()
nmt_params = load_model_parameters(nmt_model_path, map_location="cpu")
lm_params = load_model_parameters(lm_model_path, map_location="cpu")
nmt_model.load_state_dict(nmt_params)
lm_model.load_state_dict(lm_params)
if GlobalNames.USE_GPU:
nmt_model.cuda()
lm_model.cuda()
INFO('Done. Elapsed time {0}'.format(timer.toc()))
INFO('Begin...')
result_numbers = []
result = []
n_words = 0
timer.tic()
infer_progress_bar = tqdm(total=len(valid_iterator),
desc=' - (Infer) ',
unit="sents")
valid_iter = valid_iterator.build_generator()
for batch in valid_iter:
numbers, seqs_x = batch
batch_size_t = len(seqs_x)
x = prepare_data(seqs_x=seqs_x, cuda=GlobalNames.USE_GPU)
with torch.no_grad():
word_ids = nmt_lm_fusion_beam_search(nmt_model=nmt_model, lm_model=lm_model,
beam_size=FLAGS.beam_size,
max_steps=FLAGS.max_steps,
src_seqs=x,
alpha=FLAGS.alpha,
beta=FLAGS.beta)
word_ids = word_ids.cpu().numpy().tolist()
result_numbers += numbers
# Append result
for sent_t in word_ids:
sent_t = [[wid for wid in line if wid != PAD] for line in sent_t]
result.append(sent_t)
n_words += len(sent_t[0])
infer_progress_bar.update(batch_size_t)
infer_progress_bar.close()
INFO('Done. Speed: {0:.2f} words/sec'.format(n_words / (timer.toc(return_seconds=True))))
translation = []
for sent in result:
samples = []
for trans in sent:
sample = []
for w in trans:
if w == vocab_tgt.EOS:
break
sample.append(vocab_tgt.id2token(w))
samples.append(vocab_tgt.tokenizer.detokenize(sample))
translation.append(samples)
# resume the ordering
origin_order = np.argsort(result_numbers).tolist()
translation = [translation[ii] for ii in origin_order]
with open(FLAGS.saveto, 'w') as f:
for trans in translation:
f.write("%s\n"%trans[0])
if __name__ == '__main__':
_args = {
"model_name": "test_rnnlm",
"reload": False,
"config_path": "./configs/test_rnnlm.yaml",
"debug": True,
"use_gpu": False,
"task": "lm",
"log_path": "/tmp",
"saveto": "/tmp",
"valid_path": "/tmp",
}
from src.bin import train as _train
_train.run(**_args)
| true
| true
|
7906dfc50e5923133cd35b0e678e921240a868fa
| 148
|
py
|
Python
|
ugali/analysis/__init__.py
|
mcnanna/ugali
|
2572915b82af5b25e8762013e6d5baabdaa24b21
|
[
"MIT"
] | 12
|
2016-10-26T20:45:33.000Z
|
2021-11-24T04:07:43.000Z
|
ugali/analysis/__init__.py
|
mcnanna/ugali
|
2572915b82af5b25e8762013e6d5baabdaa24b21
|
[
"MIT"
] | 64
|
2017-04-14T15:04:24.000Z
|
2022-02-03T19:42:57.000Z
|
ugali/analysis/__init__.py
|
kadrlica/ugali
|
dcf53594658a2b577f4da271783b43ed0a79fec9
|
[
"MIT"
] | 12
|
2016-06-23T21:42:46.000Z
|
2021-06-19T05:29:49.000Z
|
"""
This is the UGaLi analysis sub-package.
Classes related to higher-level data analysis live here.
Modules
objects :
mask :
"""
| 13.454545
| 56
| 0.655405
| true
| true
|
|
7906e18247d505a4162fb6278c7ccbb56a76fa50
| 1,593
|
py
|
Python
|
cnn_modules/cnn_gail.py
|
aj96/InfoGAIL
|
a1f929bb47ca05a38c4fe54944204daef851fe90
|
[
"MIT"
] | null | null | null |
cnn_modules/cnn_gail.py
|
aj96/InfoGAIL
|
a1f929bb47ca05a38c4fe54944204daef851fe90
|
[
"MIT"
] | null | null | null |
cnn_modules/cnn_gail.py
|
aj96/InfoGAIL
|
a1f929bb47ca05a38c4fe54944204daef851fe90
|
[
"MIT"
] | null | null | null |
import logging
from typing import Iterable, Mapping, Optional, Union
import gym
import numpy as np
import torch as th
from stable_baselines3.common import on_policy_algorithm, vec_env
from imitation.data import types
from imitation.rewards import discrim_nets
from imitation.algorithms.adversarial import AdversarialTrainer
from .cnn_discriminator import ActObsCNN
class CNNGAIL(AdversarialTrainer):
def __init__(
self,
venv: vec_env.VecEnv,
expert_data: Union[Iterable[Mapping], types.Transitions],
expert_batch_size: int,
gen_algo: on_policy_algorithm.OnPolicyAlgorithm,
discrim=None,
*,
discrim_kwargs: Optional[Mapping] = None,
**kwargs,
):
"""Generative Adversarial Imitation Learning that accepts Image Obs
Most parameters are described in and passed to `AdversarialTrainer.__init__`.
Additional parameters that `CNNGAIL` adds on top of its superclass initializer are
as follows:
Args:
discrim_kwargs: Optional keyword arguments to use while constructing the
DiscrimNetGAIL.
"""
discrim_kwargs = discrim_kwargs or {}
if discrim == None:
discrim = discrim_nets.DiscrimNetGAIL(
venv.observation_space,
venv.action_space,
discrim_net=ActObsCNN,
**discrim_kwargs,
)
logging.info("using CNN GAIL")
super().__init__(
venv, gen_algo, discrim, expert_data, expert_batch_size, **kwargs
)
| 28.963636
| 90
| 0.662272
|
import logging
from typing import Iterable, Mapping, Optional, Union
import gym
import numpy as np
import torch as th
from stable_baselines3.common import on_policy_algorithm, vec_env
from imitation.data import types
from imitation.rewards import discrim_nets
from imitation.algorithms.adversarial import AdversarialTrainer
from .cnn_discriminator import ActObsCNN
class CNNGAIL(AdversarialTrainer):
def __init__(
self,
venv: vec_env.VecEnv,
expert_data: Union[Iterable[Mapping], types.Transitions],
expert_batch_size: int,
gen_algo: on_policy_algorithm.OnPolicyAlgorithm,
discrim=None,
*,
discrim_kwargs: Optional[Mapping] = None,
**kwargs,
):
discrim_kwargs = discrim_kwargs or {}
if discrim == None:
discrim = discrim_nets.DiscrimNetGAIL(
venv.observation_space,
venv.action_space,
discrim_net=ActObsCNN,
**discrim_kwargs,
)
logging.info("using CNN GAIL")
super().__init__(
venv, gen_algo, discrim, expert_data, expert_batch_size, **kwargs
)
| true
| true
|
7906e277f60157ae84e68f0902baffe877334ea5
| 4,220
|
py
|
Python
|
ucsmsdk/mometa/equipment/EquipmentHealthLed.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/equipment/EquipmentHealthLed.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/equipment/EquipmentHealthLed.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the general information for EquipmentHealthLed ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class EquipmentHealthLedConsts:
COLOR_AMBER = "amber"
COLOR_BLUE = "blue"
COLOR_GREEN = "green"
COLOR_RED = "red"
COLOR_UNKNOWN = "unknown"
HEALTH_LED_STATE_CRITICAL = "critical"
HEALTH_LED_STATE_MINOR = "minor"
HEALTH_LED_STATE_NORMAL = "normal"
OPER_STATE_BLINKING = "blinking"
OPER_STATE_ETH = "eth"
OPER_STATE_FC = "fc"
OPER_STATE_OFF = "off"
OPER_STATE_ON = "on"
OPER_STATE_UNKNOWN = "unknown"
OPER_STATE_UNSUPPORTED = "unsupported"
class EquipmentHealthLed(ManagedObject):
"""This is EquipmentHealthLed class."""
consts = EquipmentHealthLedConsts()
naming_props = set([])
mo_meta = MoMeta("EquipmentHealthLed", "equipmentHealthLed", "health-led", VersionMeta.Version212a, "InputOutput", 0x7f, [], ["admin", "pn-equipment", "pn-maintenance", "pn-policy"], [u'computeBlade', u'computeExtBoard', u'computeRackUnit', u'computeServerUnit', u'equipmentChassis', u'equipmentFanModule', u'equipmentFex', u'equipmentIOCard', u'equipmentPsu'], [u'computeHealthLedSensorAlarm', u'faultInst'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version212a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"color": MoPropertyMeta("color", "color", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["amber", "blue", "green", "red", "unknown"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"health_led_state": MoPropertyMeta("health_led_state", "healthLedState", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["critical", "minor", "normal"], []),
"health_led_state_qualifier": MoPropertyMeta("health_led_state_qualifier", "healthLedStateQualifier", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version212a, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version212a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"oper_state": MoPropertyMeta("oper_state", "operState", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["blinking", "eth", "fc", "off", "on", "unknown", "unsupported"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version212a, MoPropertyMeta.READ_WRITE, 0x40, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"color": "color",
"dn": "dn",
"healthLedState": "health_led_state",
"healthLedStateQualifier": "health_led_state_qualifier",
"id": "id",
"name": "name",
"operState": "oper_state",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.color = None
self.health_led_state = None
self.health_led_state_qualifier = None
self.id = None
self.name = None
self.oper_state = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "EquipmentHealthLed", parent_mo_or_dn, **kwargs)
| 56.266667
| 422
| 0.662322
|
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class EquipmentHealthLedConsts:
COLOR_AMBER = "amber"
COLOR_BLUE = "blue"
COLOR_GREEN = "green"
COLOR_RED = "red"
COLOR_UNKNOWN = "unknown"
HEALTH_LED_STATE_CRITICAL = "critical"
HEALTH_LED_STATE_MINOR = "minor"
HEALTH_LED_STATE_NORMAL = "normal"
OPER_STATE_BLINKING = "blinking"
OPER_STATE_ETH = "eth"
OPER_STATE_FC = "fc"
OPER_STATE_OFF = "off"
OPER_STATE_ON = "on"
OPER_STATE_UNKNOWN = "unknown"
OPER_STATE_UNSUPPORTED = "unsupported"
class EquipmentHealthLed(ManagedObject):
consts = EquipmentHealthLedConsts()
naming_props = set([])
mo_meta = MoMeta("EquipmentHealthLed", "equipmentHealthLed", "health-led", VersionMeta.Version212a, "InputOutput", 0x7f, [], ["admin", "pn-equipment", "pn-maintenance", "pn-policy"], [u'computeBlade', u'computeExtBoard', u'computeRackUnit', u'computeServerUnit', u'equipmentChassis', u'equipmentFanModule', u'equipmentFex', u'equipmentIOCard', u'equipmentPsu'], [u'computeHealthLedSensorAlarm', u'faultInst'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version212a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"color": MoPropertyMeta("color", "color", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["amber", "blue", "green", "red", "unknown"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"health_led_state": MoPropertyMeta("health_led_state", "healthLedState", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["critical", "minor", "normal"], []),
"health_led_state_qualifier": MoPropertyMeta("health_led_state_qualifier", "healthLedStateQualifier", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version212a, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version212a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"oper_state": MoPropertyMeta("oper_state", "operState", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["blinking", "eth", "fc", "off", "on", "unknown", "unsupported"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version212a, MoPropertyMeta.READ_WRITE, 0x40, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"color": "color",
"dn": "dn",
"healthLedState": "health_led_state",
"healthLedStateQualifier": "health_led_state_qualifier",
"id": "id",
"name": "name",
"operState": "oper_state",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.color = None
self.health_led_state = None
self.health_led_state_qualifier = None
self.id = None
self.name = None
self.oper_state = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "EquipmentHealthLed", parent_mo_or_dn, **kwargs)
| true
| true
|
7906e3f11f03d47490a17732decbb89245af9d54
| 6,397
|
py
|
Python
|
tests/testing_samples/mapping_example.py
|
leonardbinet/pandagg
|
5a5619e2190503da841e32782a4e55b35727d656
|
[
"MIT"
] | null | null | null |
tests/testing_samples/mapping_example.py
|
leonardbinet/pandagg
|
5a5619e2190503da841e32782a4e55b35727d656
|
[
"MIT"
] | null | null | null |
tests/testing_samples/mapping_example.py
|
leonardbinet/pandagg
|
5a5619e2190503da841e32782a4e55b35727d656
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
MAPPING = {
"dynamic": False,
"properties": {
"classification_type": {"type": "keyword"},
"date": {"type": "date", "format": "strict_date_optional_time||epoch_millis"},
"global_metrics": {
"dynamic": False,
"properties": {
"field": {
"dynamic": False,
"properties": {
"id": {"type": "integer"},
"name": {
"type": "text",
"fields": {
# subfield
"raw": {"type": "keyword"}
},
},
"type": {"type": "keyword"},
},
},
"dataset": {
"dynamic": False,
"properties": {
"nb_classes": {"type": "integer"},
"support_train": {"type": "integer"},
},
},
"performance": {
"dynamic": False,
"properties": {
"test": {
"dynamic": False,
"properties": {
"macro": {
"dynamic": False,
"properties": {
"f1_score": {"type": "float"},
"precision": {"type": "float"},
"recall": {"type": "float"},
},
},
"micro": {
"dynamic": False,
"properties": {
"f1_score": {"type": "float"},
"precision": {"type": "float"},
"recall": {"type": "float"},
},
},
},
}
},
},
},
},
"id": {"type": "keyword"},
"language": {"type": "keyword"},
"local_metrics": {
"type": "nested",
"dynamic": False,
"properties": {
"dataset": {
"dynamic": False,
"properties": {
"support_test": {"type": "integer"},
"support_train": {"type": "integer"},
},
},
"field_class": {
"dynamic": False,
"properties": {
"id": {"type": "integer"},
"name": {"type": "keyword"},
},
},
"performance": {
"dynamic": False,
"properties": {
"test": {
"dynamic": False,
"properties": {
"f1_score": {"type": "float"},
"precision": {"type": "float"},
"recall": {"type": "float"},
},
}
},
},
},
},
"workflow": {"type": "keyword"},
},
}
EXPECTED_MAPPING_REPR = """_
├── classification_type Keyword
├── date Date
├── global_metrics {Object}
│ ├── dataset {Object}
│ │ ├── nb_classes Integer
│ │ └── support_train Integer
│ ├── field {Object}
│ │ ├── id Integer
│ │ ├── name Text
│ │ │ └── raw ~ Keyword
│ │ └── type Keyword
│ └── performance {Object}
│ └── test {Object}
│ ├── macro {Object}
│ │ ├── f1_score Float
│ │ ├── precision Float
│ │ └── recall Float
│ └── micro {Object}
│ ├── f1_score Float
│ ├── precision Float
│ └── recall Float
├── id Keyword
├── language Keyword
├── local_metrics [Nested]
│ ├── dataset {Object}
│ │ ├── support_test Integer
│ │ └── support_train Integer
│ ├── field_class {Object}
│ │ ├── id Integer
│ │ └── name Keyword
│ └── performance {Object}
│ └── test {Object}
│ ├── f1_score Float
│ ├── precision Float
│ └── recall Float
└── workflow Keyword
"""
EXPECTED_MAPPING_TREE_REPR = """<Mapping>\n%s""" % EXPECTED_MAPPING_REPR
EXPECTED_CLIENT_BOUND_MAPPING_REPR = """<IMapping>\n%s""" % EXPECTED_MAPPING_REPR
| 45.368794
| 90
| 0.239487
|
MAPPING = {
"dynamic": False,
"properties": {
"classification_type": {"type": "keyword"},
"date": {"type": "date", "format": "strict_date_optional_time||epoch_millis"},
"global_metrics": {
"dynamic": False,
"properties": {
"field": {
"dynamic": False,
"properties": {
"id": {"type": "integer"},
"name": {
"type": "text",
"fields": {
"raw": {"type": "keyword"}
},
},
"type": {"type": "keyword"},
},
},
"dataset": {
"dynamic": False,
"properties": {
"nb_classes": {"type": "integer"},
"support_train": {"type": "integer"},
},
},
"performance": {
"dynamic": False,
"properties": {
"test": {
"dynamic": False,
"properties": {
"macro": {
"dynamic": False,
"properties": {
"f1_score": {"type": "float"},
"precision": {"type": "float"},
"recall": {"type": "float"},
},
},
"micro": {
"dynamic": False,
"properties": {
"f1_score": {"type": "float"},
"precision": {"type": "float"},
"recall": {"type": "float"},
},
},
},
}
},
},
},
},
"id": {"type": "keyword"},
"language": {"type": "keyword"},
"local_metrics": {
"type": "nested",
"dynamic": False,
"properties": {
"dataset": {
"dynamic": False,
"properties": {
"support_test": {"type": "integer"},
"support_train": {"type": "integer"},
},
},
"field_class": {
"dynamic": False,
"properties": {
"id": {"type": "integer"},
"name": {"type": "keyword"},
},
},
"performance": {
"dynamic": False,
"properties": {
"test": {
"dynamic": False,
"properties": {
"f1_score": {"type": "float"},
"precision": {"type": "float"},
"recall": {"type": "float"},
},
}
},
},
},
},
"workflow": {"type": "keyword"},
},
}
EXPECTED_MAPPING_REPR = """_
├── classification_type Keyword
├── date Date
├── global_metrics {Object}
│ ├── dataset {Object}
│ │ ├── nb_classes Integer
│ │ └── support_train Integer
│ ├── field {Object}
│ │ ├── id Integer
│ │ ├── name Text
│ │ │ └── raw ~ Keyword
│ │ └── type Keyword
│ └── performance {Object}
│ └── test {Object}
│ ├── macro {Object}
│ │ ├── f1_score Float
│ │ ├── precision Float
│ │ └── recall Float
│ └── micro {Object}
│ ├── f1_score Float
│ ├── precision Float
│ └── recall Float
├── id Keyword
├── language Keyword
├── local_metrics [Nested]
│ ├── dataset {Object}
│ │ ├── support_test Integer
│ │ └── support_train Integer
│ ├── field_class {Object}
│ │ ├── id Integer
│ │ └── name Keyword
│ └── performance {Object}
│ └── test {Object}
│ ├── f1_score Float
│ ├── precision Float
│ └── recall Float
└── workflow Keyword
"""
EXPECTED_MAPPING_TREE_REPR = """<Mapping>\n%s""" % EXPECTED_MAPPING_REPR
EXPECTED_CLIENT_BOUND_MAPPING_REPR = """<IMapping>\n%s""" % EXPECTED_MAPPING_REPR
| true
| true
|
7906e42edd58aae864814babfa54d8e8bff934f2
| 772
|
py
|
Python
|
Chapter03/file_start.py
|
JeffreyAsuncion/LearningPython
|
8242c3874ebb0f6a1e4cfd4ad845a9b42ffff0cc
|
[
"MIT"
] | null | null | null |
Chapter03/file_start.py
|
JeffreyAsuncion/LearningPython
|
8242c3874ebb0f6a1e4cfd4ad845a9b42ffff0cc
|
[
"MIT"
] | null | null | null |
Chapter03/file_start.py
|
JeffreyAsuncion/LearningPython
|
8242c3874ebb0f6a1e4cfd4ad845a9b42ffff0cc
|
[
"MIT"
] | null | null | null |
def main():
# Open a file for writing and create it if it doesn't exist
# myfile = open("textfile.txt", "w+")
# # Open the file for appending text to the end
# myfile = open("textfile.txt", "a+")
# # write some lines of data to the file
# for i in range(10):
# myfile.write("This is some new text\n")
# # close the file when done
# myfile.close()
# Open the file back up and read the contents
myfile = open("textfile.txt", "r")
if myfile.mode == 'r':
# contents = myfile.read()
# print(contents)
filelines = myfile.readlines()
for fileline in filelines:
print(fileline)
if __name__ == "__main__":
main()
| 25.733333
| 64
| 0.537565
|
def main():
# myfile = open("textfile.txt", "w+")
# # Open the file for appending text to the end
# myfile = open("textfile.txt", "a+")
# # write some lines of data to the file
# for i in range(10):
# myfile.write("This is some new text\n")
# # close the file when done
# myfile.close()
# Open the file back up and read the contents
myfile = open("textfile.txt", "r")
if myfile.mode == 'r':
# contents = myfile.read()
# print(contents)
filelines = myfile.readlines()
for fileline in filelines:
print(fileline)
if __name__ == "__main__":
main()
| true
| true
|
7906e55041dfad55e8531eb167030d717df9a61c
| 2,224
|
py
|
Python
|
tests/event_sourced_aggregates/test_raising_events_from_within_aggregates.py
|
mpsiva89/protean
|
315fa56da3f64178bbbf0edf1995af46d5eb3da7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/event_sourced_aggregates/test_raising_events_from_within_aggregates.py
|
mpsiva89/protean
|
315fa56da3f64178bbbf0edf1995af46d5eb3da7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/event_sourced_aggregates/test_raising_events_from_within_aggregates.py
|
mpsiva89/protean
|
315fa56da3f64178bbbf0edf1995af46d5eb3da7
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import annotations
from uuid import uuid4
import pytest
from protean import BaseCommandHandler, BaseEvent, BaseEventSourcedAggregate, handle
from protean.core.command import BaseCommand
from protean.core.event_sourced_aggregate import apply
from protean.fields import Identifier, String
from protean.globals import current_domain
from protean.utils import fqn
class Register(BaseCommand):
id = Identifier()
email = String()
name = String()
password_hash = String()
class Registered(BaseEvent):
id = Identifier()
email = String()
name = String()
password_hash = String()
class User(BaseEventSourcedAggregate):
email = String()
name = String()
password_hash = String()
@classmethod
def register(cls, command: Register) -> User:
user = cls(
id=command.id,
email=command.email,
name=command.name,
password_hash=command.password_hash,
)
user.raise_(
Registered(
id=command.id,
email=command.email,
name=command.name,
password_hash=command.password_hash,
)
)
current_domain.repository_for(User).add(user)
return user
@apply(Registered)
def registered(self, _: Registered) -> None:
pass
class UserCommandHandler(BaseCommandHandler):
@handle(Register)
def register_user(self, command: Register) -> None:
User.register(command)
@pytest.fixture(autouse=True)
def register_elements(test_domain):
test_domain.register(User)
test_domain.register(UserCommandHandler, aggregate_cls=User)
@pytest.mark.eventstore
def test_that_events_can_be_raised_from_within_aggregates(test_domain):
identifier = str(uuid4())
UserCommandHandler().register_user(
Register(
id=identifier,
email="john.doe@example.com",
name="John Doe",
password_hash="hash",
)
)
messages = test_domain.event_store.store._read("user")
assert len(messages) == 1
assert messages[0]["stream_name"] == f"user-{identifier}"
assert messages[0]["type"] == f"{fqn(Registered)}"
| 24.988764
| 84
| 0.657824
|
from __future__ import annotations
from uuid import uuid4
import pytest
from protean import BaseCommandHandler, BaseEvent, BaseEventSourcedAggregate, handle
from protean.core.command import BaseCommand
from protean.core.event_sourced_aggregate import apply
from protean.fields import Identifier, String
from protean.globals import current_domain
from protean.utils import fqn
class Register(BaseCommand):
id = Identifier()
email = String()
name = String()
password_hash = String()
class Registered(BaseEvent):
id = Identifier()
email = String()
name = String()
password_hash = String()
class User(BaseEventSourcedAggregate):
email = String()
name = String()
password_hash = String()
@classmethod
def register(cls, command: Register) -> User:
user = cls(
id=command.id,
email=command.email,
name=command.name,
password_hash=command.password_hash,
)
user.raise_(
Registered(
id=command.id,
email=command.email,
name=command.name,
password_hash=command.password_hash,
)
)
current_domain.repository_for(User).add(user)
return user
@apply(Registered)
def registered(self, _: Registered) -> None:
pass
class UserCommandHandler(BaseCommandHandler):
@handle(Register)
def register_user(self, command: Register) -> None:
User.register(command)
@pytest.fixture(autouse=True)
def register_elements(test_domain):
test_domain.register(User)
test_domain.register(UserCommandHandler, aggregate_cls=User)
@pytest.mark.eventstore
def test_that_events_can_be_raised_from_within_aggregates(test_domain):
identifier = str(uuid4())
UserCommandHandler().register_user(
Register(
id=identifier,
email="john.doe@example.com",
name="John Doe",
password_hash="hash",
)
)
messages = test_domain.event_store.store._read("user")
assert len(messages) == 1
assert messages[0]["stream_name"] == f"user-{identifier}"
assert messages[0]["type"] == f"{fqn(Registered)}"
| true
| true
|
7906e811392f0d2a66942e3722bc905f36053fcd
| 1,186
|
py
|
Python
|
readthedocs/projects/signals.py
|
ank-forked/readthedocs.org
|
e4110e8db5d25b7e6c699dd2df1a580b04ee8d16
|
[
"MIT"
] | 1
|
2019-10-16T07:33:37.000Z
|
2019-10-16T07:33:37.000Z
|
readthedocs/projects/signals.py
|
ank-forked/readthedocs.org
|
e4110e8db5d25b7e6c699dd2df1a580b04ee8d16
|
[
"MIT"
] | 4
|
2021-02-08T21:06:49.000Z
|
2021-12-13T20:51:17.000Z
|
readthedocs/projects/signals.py
|
ank-forked/readthedocs.org
|
e4110e8db5d25b7e6c699dd2df1a580b04ee8d16
|
[
"MIT"
] | 3
|
2016-08-04T12:53:13.000Z
|
2016-11-02T14:17:55.000Z
|
"""Project signals"""
import logging
import django.dispatch
from django.contrib import messages
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from readthedocs.oauth.services import registry
before_vcs = django.dispatch.Signal(providing_args=["version"])
after_vcs = django.dispatch.Signal(providing_args=["version"])
before_build = django.dispatch.Signal(providing_args=["version"])
after_build = django.dispatch.Signal(providing_args=["version"])
project_import = django.dispatch.Signal(providing_args=["project"])
log = logging.getLogger(__name__)
@receiver(project_import)
def handle_project_import(sender, **kwargs):
"""Add post-commit hook on project import"""
project = sender
request = kwargs.get('request')
for service_cls in registry:
if service_cls.is_project_service(project):
service = service_cls.for_user(request.user)
if service is not None:
if service.setup_webhook(project):
messages.success(request, _('Webhook activated'))
else:
messages.error(request, _('Webhook configuration failed'))
| 30.410256
| 78
| 0.716695
|
import logging
import django.dispatch
from django.contrib import messages
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from readthedocs.oauth.services import registry
before_vcs = django.dispatch.Signal(providing_args=["version"])
after_vcs = django.dispatch.Signal(providing_args=["version"])
before_build = django.dispatch.Signal(providing_args=["version"])
after_build = django.dispatch.Signal(providing_args=["version"])
project_import = django.dispatch.Signal(providing_args=["project"])
log = logging.getLogger(__name__)
@receiver(project_import)
def handle_project_import(sender, **kwargs):
project = sender
request = kwargs.get('request')
for service_cls in registry:
if service_cls.is_project_service(project):
service = service_cls.for_user(request.user)
if service is not None:
if service.setup_webhook(project):
messages.success(request, _('Webhook activated'))
else:
messages.error(request, _('Webhook configuration failed'))
| true
| true
|
7906e813c9cc3460deeb3dd0d5bd2171e48fde29
| 30,056
|
py
|
Python
|
yt_dlp/extractor/facebook.py
|
RobinD42/yt-dlc
|
aae273ded871caac1995381033a5b7ecaf4a526b
|
[
"Unlicense"
] | null | null | null |
yt_dlp/extractor/facebook.py
|
RobinD42/yt-dlc
|
aae273ded871caac1995381033a5b7ecaf4a526b
|
[
"Unlicense"
] | null | null | null |
yt_dlp/extractor/facebook.py
|
RobinD42/yt-dlc
|
aae273ded871caac1995381033a5b7ecaf4a526b
|
[
"Unlicense"
] | 1
|
2021-09-10T18:22:00.000Z
|
2021-09-10T18:22:00.000Z
|
# coding: utf-8
from __future__ import unicode_literals
import json
import re
import socket
from .common import InfoExtractor
from ..compat import (
compat_etree_fromstring,
compat_http_client,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
)
from ..utils import (
clean_html,
error_to_compat_str,
ExtractorError,
float_or_none,
get_element_by_id,
int_or_none,
js_to_json,
limit_length,
parse_count,
qualities,
sanitized_Request,
try_get,
urlencode_postdata,
urljoin,
)
class FacebookIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
https?://
(?:[\w-]+\.)?(?:facebook\.com|facebookcorewwwi\.onion)/
(?:[^#]*?\#!/)?
(?:
(?:
video/video\.php|
photo\.php|
video\.php|
video/embed|
story\.php|
watch(?:/live)?/?
)\?(?:.*?)(?:v|video_id|story_fbid)=|
[^/]+/videos/(?:[^/]+/)?|
[^/]+/posts/|
groups/[^/]+/permalink/|
watchparty/
)|
facebook:
)
(?P<id>[0-9]+)
'''
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
_NETRC_MACHINE = 'facebook'
IE_NAME = 'facebook'
_VIDEO_PAGE_TEMPLATE = 'https://www.facebook.com/video/video.php?v=%s'
_VIDEO_PAGE_TAHOE_TEMPLATE = 'https://www.facebook.com/video/tahoe/async/%s/?chain=true&isvideo=true&payloadtype=primary'
_TESTS = [{
'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
'md5': '6a40d33c0eccbb1af76cf0485a052659',
'info_dict': {
'id': '637842556329505',
'ext': 'mp4',
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
'uploader': 'Tennis on Facebook',
'upload_date': '20140908',
'timestamp': 1410199200,
},
'skip': 'Requires logging in',
}, {
# data.video
'url': 'https://www.facebook.com/video.php?v=274175099429670',
'info_dict': {
'id': '274175099429670',
'ext': 'mp4',
'title': 're:^Asif Nawab Butt posted a video',
'uploader': 'Asif Nawab Butt',
'upload_date': '20140506',
'timestamp': 1399398998,
'thumbnail': r're:^https?://.*',
},
'expected_warnings': [
'title'
]
}, {
'note': 'Video with DASH manifest',
'url': 'https://www.facebook.com/video.php?v=957955867617029',
'md5': 'b2c28d528273b323abe5c6ab59f0f030',
'info_dict': {
'id': '957955867617029',
'ext': 'mp4',
'title': 'When you post epic content on instagram.com/433 8 million followers, this is ...',
'uploader': 'Demy de Zeeuw',
'upload_date': '20160110',
'timestamp': 1452431627,
},
'skip': 'Requires logging in',
}, {
'url': 'https://www.facebook.com/maxlayn/posts/10153807558977570',
'md5': '037b1fa7f3c2d02b7a0d7bc16031ecc6',
'info_dict': {
'id': '544765982287235',
'ext': 'mp4',
'title': '"What are you doing running in the snow?"',
'uploader': 'FailArmy',
},
'skip': 'Video gone',
}, {
'url': 'https://m.facebook.com/story.php?story_fbid=1035862816472149&id=116132035111903',
'md5': '1deb90b6ac27f7efcf6d747c8a27f5e3',
'info_dict': {
'id': '1035862816472149',
'ext': 'mp4',
'title': 'What the Flock Is Going On In New Zealand Credit: ViralHog',
'uploader': 'S. Saint',
},
'skip': 'Video gone',
}, {
'note': 'swf params escaped',
'url': 'https://www.facebook.com/barackobama/posts/10153664894881749',
'md5': '97ba073838964d12c70566e0085c2b91',
'info_dict': {
'id': '10153664894881749',
'ext': 'mp4',
'title': 'Average time to confirm recent Supreme Court nominees: 67 days Longest it\'s t...',
'thumbnail': r're:^https?://.*',
'timestamp': 1456259628,
'upload_date': '20160223',
'uploader': 'Barack Obama',
},
}, {
# have 1080P, but only up to 720p in swf params
# data.video.story.attachments[].media
'url': 'https://www.facebook.com/cnn/videos/10155529876156509/',
'md5': '9571fae53d4165bbbadb17a94651dcdc',
'info_dict': {
'id': '10155529876156509',
'ext': 'mp4',
'title': 'She survived the holocaust — and years later, she’s getting her citizenship s...',
'timestamp': 1477818095,
'upload_date': '20161030',
'uploader': 'CNN',
'thumbnail': r're:^https?://.*',
'view_count': int,
},
}, {
# bigPipe.onPageletArrive ... onPageletArrive pagelet_group_mall
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.media
'url': 'https://www.facebook.com/yaroslav.korpan/videos/1417995061575415/',
'info_dict': {
'id': '1417995061575415',
'ext': 'mp4',
'title': 'md5:1db063d6a8c13faa8da727817339c857',
'timestamp': 1486648217,
'upload_date': '20170209',
'uploader': 'Yaroslav Korpan',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.facebook.com/LaGuiaDelVaron/posts/1072691702860471',
'info_dict': {
'id': '1072691702860471',
'ext': 'mp4',
'title': 'md5:ae2d22a93fbb12dad20dc393a869739d',
'timestamp': 1477305000,
'upload_date': '20161024',
'uploader': 'La Guía Del Varón',
'thumbnail': r're:^https?://.*',
},
'params': {
'skip_download': True,
},
}, {
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.media
'url': 'https://www.facebook.com/groups/1024490957622648/permalink/1396382447100162/',
'info_dict': {
'id': '1396382447100162',
'ext': 'mp4',
'title': 'md5:19a428bbde91364e3de815383b54a235',
'timestamp': 1486035494,
'upload_date': '20170202',
'uploader': 'Elisabeth Ahtn',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf',
'only_matching': True,
}, {
# data.mediaset.currMedia.edges
'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater',
'only_matching': True,
}, {
# data.video.story.attachments[].media
'url': 'facebook:544765982287235',
'only_matching': True,
}, {
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.media
'url': 'https://www.facebook.com/groups/164828000315060/permalink/764967300301124/',
'only_matching': True,
}, {
# data.video.creation_story.attachments[].media
'url': 'https://zh-hk.facebook.com/peoplespower/videos/1135894589806027/',
'only_matching': True,
}, {
# data.video
'url': 'https://www.facebookcorewwwi.onion/video.php?v=274175099429670',
'only_matching': True,
}, {
# no title
'url': 'https://www.facebook.com/onlycleverentertainment/videos/1947995502095005/',
'only_matching': True,
}, {
# data.video
'url': 'https://www.facebook.com/WatchESLOne/videos/359649331226507/',
'info_dict': {
'id': '359649331226507',
'ext': 'mp4',
'title': '#ESLOne VoD - Birmingham Finals Day#1 Fnatic vs. @Evil Geniuses',
'uploader': 'ESL One Dota 2',
},
'params': {
'skip_download': True,
},
}, {
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.all_subattachments.nodes[].media
'url': 'https://www.facebook.com/100033620354545/videos/106560053808006/',
'info_dict': {
'id': '106560053808006',
},
'playlist_count': 2,
}, {
# data.video.story.attachments[].media
'url': 'https://www.facebook.com/watch/?v=647537299265662',
'only_matching': True,
}, {
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.all_subattachments.nodes[].media
'url': 'https://www.facebook.com/PankajShahLondon/posts/10157667649866271',
'info_dict': {
'id': '10157667649866271',
},
'playlist_count': 3,
}, {
# data.nodes[].comet_sections.content.story.attachments[].style_type_renderer.attachment.media
'url': 'https://m.facebook.com/Alliance.Police.Department/posts/4048563708499330',
'info_dict': {
'id': '117576630041613',
'ext': 'mp4',
# TODO: title can be extracted from video page
'title': 'Facebook video #117576630041613',
'uploader_id': '189393014416438',
'upload_date': '20201123',
'timestamp': 1606162592,
},
'skip': 'Requires logging in',
}, {
# node.comet_sections.content.story.attached_story.attachments.style_type_renderer.attachment.media
'url': 'https://www.facebook.com/groups/ateistiskselskab/permalink/10154930137678856/',
'info_dict': {
'id': '211567722618337',
'ext': 'mp4',
'title': 'Facebook video #211567722618337',
'uploader_id': '127875227654254',
'upload_date': '20161122',
'timestamp': 1479793574,
},
}, {
# data.video.creation_story.attachments[].media
'url': 'https://www.facebook.com/watch/live/?v=1823658634322275',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/watchparty/211641140192478',
'info_dict': {
'id': '211641140192478',
},
'playlist_count': 1,
'skip': 'Requires logging in',
}]
_SUPPORTED_PAGLETS_REGEX = r'(?:pagelet_group_mall|permalink_video_pagelet|hyperfeed_story_id_[0-9a-f]+)'
_api_config = {
'graphURI': '/api/graphql/'
}
@staticmethod
def _extract_urls(webpage):
urls = []
for mobj in re.finditer(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://www\.facebook\.com/(?:video/embed|plugins/video\.php).+?)\1',
webpage):
urls.append(mobj.group('url'))
# Facebook API embed
# see https://developers.facebook.com/docs/plugins/embedded-video-player
for mobj in re.finditer(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\bfb-(?:video|post)\b[^\'"]*(?P=q1)[^>]+
data-href=(?P<q2>[\'"])(?P<url>(?:https?:)?//(?:www\.)?facebook.com/.+?)(?P=q2)''', webpage):
urls.append(mobj.group('url'))
return urls
def _login(self):
useremail, password = self._get_login_info()
if useremail is None:
return
login_page_req = sanitized_Request(self._LOGIN_URL)
self._set_cookie('facebook.com', 'locale', 'en_US')
login_page = self._download_webpage(login_page_req, None,
note='Downloading login page',
errnote='Unable to download login page')
lsd = self._search_regex(
r'<input type="hidden" name="lsd" value="([^"]*)"',
login_page, 'lsd')
lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
login_form = {
'email': useremail,
'pass': password,
'lsd': lsd,
'lgnrnd': lgnrnd,
'next': 'http://facebook.com/home.php',
'default_persistent': '0',
'legacy_return': '1',
'timezone': '-60',
'trynum': '1',
}
request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
try:
login_results = self._download_webpage(request, None,
note='Logging in', errnote='unable to fetch login page')
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
error = self._html_search_regex(
r'(?s)<div[^>]+class=(["\']).*?login_error_box.*?\1[^>]*><div[^>]*>.*?</div><div[^>]*>(?P<error>.+?)</div>',
login_results, 'login error', default=None, group='error')
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
return
fb_dtsg = self._search_regex(
r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg', default=None)
h = self._search_regex(
r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h', default=None)
if not fb_dtsg or not h:
return
check_form = {
'fb_dtsg': fb_dtsg,
'h': h,
'name_action_selected': 'dont_save',
}
check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
check_response = self._download_webpage(check_req, None,
note='Confirming login')
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
return
def _real_initialize(self):
self._login()
def _extract_from_url(self, url, video_id):
webpage = self._download_webpage(
url.replace('://m.facebook.com/', '://www.facebook.com/'), video_id)
video_data = None
def extract_video_data(instances):
video_data = []
for item in instances:
if try_get(item, lambda x: x[1][0]) == 'VideoConfig':
video_item = item[2][0]
if video_item.get('video_id'):
video_data.append(video_item['videoData'])
return video_data
server_js_data = self._parse_json(self._search_regex(
[r'handleServerJS\(({.+})(?:\);|,")', r'\bs\.handle\(({.+?})\);'],
webpage, 'server js data', default='{}'), video_id, fatal=False)
if server_js_data:
video_data = extract_video_data(server_js_data.get('instances', []))
def extract_from_jsmods_instances(js_data):
if js_data:
return extract_video_data(try_get(
js_data, lambda x: x['jsmods']['instances'], list) or [])
def extract_dash_manifest(video, formats):
dash_manifest = video.get('dash_manifest')
if dash_manifest:
formats.extend(self._parse_mpd_formats(
compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest))))
def process_formats(formats):
# Downloads with browser's User-Agent are rate limited. Working around
# with non-browser User-Agent.
for f in formats:
f.setdefault('http_headers', {})['User-Agent'] = 'facebookexternalhit/1.1'
self._sort_formats(formats)
def extract_relay_data(_filter):
return self._parse_json(self._search_regex(
r'handleWithCustomApplyEach\([^,]+,\s*({.*?%s.*?})\);' % _filter,
webpage, 'replay data', default='{}'), video_id, fatal=False) or {}
def extract_relay_prefetched_data(_filter):
replay_data = extract_relay_data(_filter)
for require in (replay_data.get('require') or []):
if require[0] == 'RelayPrefetchedStreamCache':
return try_get(require, lambda x: x[3][1]['__bbox']['result']['data'], dict) or {}
if not video_data:
server_js_data = self._parse_json(self._search_regex([
r'bigPipe\.onPageletArrive\(({.+?})\)\s*;\s*}\s*\)\s*,\s*["\']onPageletArrive\s+' + self._SUPPORTED_PAGLETS_REGEX,
r'bigPipe\.onPageletArrive\(({.*?id\s*:\s*"%s".*?})\);' % self._SUPPORTED_PAGLETS_REGEX
], webpage, 'js data', default='{}'), video_id, js_to_json, False)
video_data = extract_from_jsmods_instances(server_js_data)
if not video_data:
data = extract_relay_prefetched_data(
r'"(?:dash_manifest|playable_url(?:_quality_hd)?)"\s*:\s*"[^"]+"')
if data:
entries = []
def parse_graphql_video(video):
formats = []
q = qualities(['sd', 'hd'])
for (suffix, format_id) in [('', 'sd'), ('_quality_hd', 'hd')]:
playable_url = video.get('playable_url' + suffix)
if not playable_url:
continue
formats.append({
'format_id': format_id,
'quality': q(format_id),
'url': playable_url,
})
extract_dash_manifest(video, formats)
process_formats(formats)
v_id = video.get('videoId') or video.get('id') or video_id
info = {
'id': v_id,
'formats': formats,
'thumbnail': try_get(video, lambda x: x['thumbnailImage']['uri']),
'uploader_id': try_get(video, lambda x: x['owner']['id']),
'timestamp': int_or_none(video.get('publish_time')),
'duration': float_or_none(video.get('playable_duration_in_ms'), 1000),
}
description = try_get(video, lambda x: x['savable_description']['text'])
title = video.get('name')
if title:
info.update({
'title': title,
'description': description,
})
else:
info['title'] = description or 'Facebook video #%s' % v_id
entries.append(info)
def parse_attachment(attachment, key='media'):
media = attachment.get(key) or {}
if media.get('__typename') == 'Video':
return parse_graphql_video(media)
nodes = data.get('nodes') or []
node = data.get('node') or {}
if not nodes and node:
nodes.append(node)
for node in nodes:
story = try_get(node, lambda x: x['comet_sections']['content']['story'], dict) or {}
attachments = try_get(story, [
lambda x: x['attached_story']['attachments'],
lambda x: x['attachments']
], list) or []
for attachment in attachments:
attachment = try_get(attachment, lambda x: x['style_type_renderer']['attachment'], dict)
ns = try_get(attachment, lambda x: x['all_subattachments']['nodes'], list) or []
for n in ns:
parse_attachment(n)
parse_attachment(attachment)
edges = try_get(data, lambda x: x['mediaset']['currMedia']['edges'], list) or []
for edge in edges:
parse_attachment(edge, key='node')
video = data.get('video') or {}
if video:
attachments = try_get(video, [
lambda x: x['story']['attachments'],
lambda x: x['creation_story']['attachments']
], list) or []
for attachment in attachments:
parse_attachment(attachment)
if not entries:
parse_graphql_video(video)
return self.playlist_result(entries, video_id)
if not video_data:
m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
if m_msg is not None:
raise ExtractorError(
'The video is not available, Facebook said: "%s"' % m_msg.group(1),
expected=True)
elif '>You must log in to continue' in webpage:
self.raise_login_required()
if not video_data and '/watchparty/' in url:
post_data = {
'doc_id': 3731964053542869,
'variables': json.dumps({
'livingRoomID': video_id,
}),
}
prefetched_data = extract_relay_prefetched_data(r'"login_data"\s*:\s*{')
if prefetched_data:
lsd = try_get(prefetched_data, lambda x: x['login_data']['lsd'], dict)
if lsd:
post_data[lsd['name']] = lsd['value']
relay_data = extract_relay_data(r'\[\s*"RelayAPIConfigDefaults"\s*,')
for define in (relay_data.get('define') or []):
if define[0] == 'RelayAPIConfigDefaults':
self._api_config = define[2]
living_room = self._download_json(
urljoin(url, self._api_config['graphURI']), video_id,
data=urlencode_postdata(post_data))['data']['living_room']
entries = []
for edge in (try_get(living_room, lambda x: x['recap']['watched_content']['edges']) or []):
video = try_get(edge, lambda x: x['node']['video']) or {}
v_id = video.get('id')
if not v_id:
continue
v_id = compat_str(v_id)
entries.append(self.url_result(
self._VIDEO_PAGE_TEMPLATE % v_id,
self.ie_key(), v_id, video.get('name')))
return self.playlist_result(entries, video_id)
if not video_data:
# Video info not in first request, do a secondary request using
# tahoe player specific URL
tahoe_data = self._download_webpage(
self._VIDEO_PAGE_TAHOE_TEMPLATE % video_id, video_id,
data=urlencode_postdata({
'__a': 1,
'__pc': self._search_regex(
r'pkg_cohort["\']\s*:\s*["\'](.+?)["\']', webpage,
'pkg cohort', default='PHASED:DEFAULT'),
'__rev': self._search_regex(
r'client_revision["\']\s*:\s*(\d+),', webpage,
'client revision', default='3944515'),
'fb_dtsg': self._search_regex(
r'"DTSGInitialData"\s*,\s*\[\]\s*,\s*{\s*"token"\s*:\s*"([^"]+)"',
webpage, 'dtsg token', default=''),
}),
headers={
'Content-Type': 'application/x-www-form-urlencoded',
})
tahoe_js_data = self._parse_json(
self._search_regex(
r'for\s+\(\s*;\s*;\s*\)\s*;(.+)', tahoe_data,
'tahoe js data', default='{}'),
video_id, fatal=False)
video_data = extract_from_jsmods_instances(tahoe_js_data)
if not video_data:
raise ExtractorError('Cannot parse data')
if len(video_data) > 1:
entries = []
for v in video_data:
video_url = v[0].get('video_url')
if not video_url:
continue
entries.append(self.url_result(urljoin(
url, video_url), self.ie_key(), v[0].get('video_id')))
return self.playlist_result(entries, video_id)
video_data = video_data[0]
formats = []
subtitles = {}
for f in video_data:
format_id = f['stream_type']
if f and isinstance(f, dict):
f = [f]
if not f or not isinstance(f, list):
continue
for quality in ('sd', 'hd'):
for src_type in ('src', 'src_no_ratelimit'):
src = f[0].get('%s_%s' % (quality, src_type))
if src:
preference = -10 if format_id == 'progressive' else 0
if quality == 'hd':
preference += 5
formats.append({
'format_id': '%s_%s_%s' % (format_id, quality, src_type),
'url': src,
'quality': preference,
})
extract_dash_manifest(f[0], formats)
subtitles_src = f[0].get('subtitles_src')
if subtitles_src:
subtitles.setdefault('en', []).append({'url': subtitles_src})
if not formats:
raise ExtractorError('Cannot find video formats')
process_formats(formats)
video_title = self._html_search_regex(
r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage,
'title', default=None)
if not video_title:
video_title = self._html_search_regex(
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
webpage, 'alternative title', default=None)
if not video_title:
video_title = self._html_search_meta(
'description', webpage, 'title', default=None)
if video_title:
video_title = limit_length(video_title, 80)
else:
video_title = 'Facebook video #%s' % video_id
uploader = clean_html(get_element_by_id(
'fbPhotoPageAuthorName', webpage)) or self._search_regex(
r'ownerName\s*:\s*"([^"]+)"', webpage, 'uploader',
default=None) or self._og_search_title(webpage, fatal=False)
timestamp = int_or_none(self._search_regex(
r'<abbr[^>]+data-utime=["\'](\d+)', webpage,
'timestamp', default=None))
thumbnail = self._html_search_meta(['og:image', 'twitter:image'], webpage)
view_count = parse_count(self._search_regex(
r'\bviewCount\s*:\s*["\']([\d,.]+)', webpage, 'view count',
default=None))
info_dict = {
'id': video_id,
'title': video_title,
'formats': formats,
'uploader': uploader,
'timestamp': timestamp,
'thumbnail': thumbnail,
'view_count': view_count,
'subtitles': subtitles,
}
return info_dict
def _real_extract(self, url):
video_id = self._match_id(url)
real_url = self._VIDEO_PAGE_TEMPLATE % video_id if url.startswith('facebook:') else url
return self._extract_from_url(real_url, video_id)
class FacebookPluginsVideoIE(InfoExtractor):
_VALID_URL = r'https?://(?:[\w-]+\.)?facebook\.com/plugins/video\.php\?.*?\bhref=(?P<id>https.+)'
_TESTS = [{
'url': 'https://www.facebook.com/plugins/video.php?href=https%3A%2F%2Fwww.facebook.com%2Fgov.sg%2Fvideos%2F10154383743583686%2F&show_text=0&width=560',
'md5': '5954e92cdfe51fe5782ae9bda7058a07',
'info_dict': {
'id': '10154383743583686',
'ext': 'mp4',
'title': 'What to do during the haze?',
'uploader': 'Gov.sg',
'upload_date': '20160826',
'timestamp': 1472184808,
},
'add_ie': [FacebookIE.ie_key()],
}, {
'url': 'https://www.facebook.com/plugins/video.php?href=https%3A%2F%2Fwww.facebook.com%2Fvideo.php%3Fv%3D10204634152394104',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/plugins/video.php?href=https://www.facebook.com/gov.sg/videos/10154383743583686/&show_text=0&width=560',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
compat_urllib_parse_unquote(self._match_id(url)),
FacebookIE.ie_key())
| 42.512023
| 159
| 0.523756
|
from __future__ import unicode_literals
import json
import re
import socket
from .common import InfoExtractor
from ..compat import (
compat_etree_fromstring,
compat_http_client,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
)
from ..utils import (
clean_html,
error_to_compat_str,
ExtractorError,
float_or_none,
get_element_by_id,
int_or_none,
js_to_json,
limit_length,
parse_count,
qualities,
sanitized_Request,
try_get,
urlencode_postdata,
urljoin,
)
class FacebookIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
https?://
(?:[\w-]+\.)?(?:facebook\.com|facebookcorewwwi\.onion)/
(?:[^#]*?\#!/)?
(?:
(?:
video/video\.php|
photo\.php|
video\.php|
video/embed|
story\.php|
watch(?:/live)?/?
)\?(?:.*?)(?:v|video_id|story_fbid)=|
[^/]+/videos/(?:[^/]+/)?|
[^/]+/posts/|
groups/[^/]+/permalink/|
watchparty/
)|
facebook:
)
(?P<id>[0-9]+)
'''
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
_NETRC_MACHINE = 'facebook'
IE_NAME = 'facebook'
_VIDEO_PAGE_TEMPLATE = 'https://www.facebook.com/video/video.php?v=%s'
_VIDEO_PAGE_TAHOE_TEMPLATE = 'https://www.facebook.com/video/tahoe/async/%s/?chain=true&isvideo=true&payloadtype=primary'
_TESTS = [{
'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
'md5': '6a40d33c0eccbb1af76cf0485a052659',
'info_dict': {
'id': '637842556329505',
'ext': 'mp4',
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
'uploader': 'Tennis on Facebook',
'upload_date': '20140908',
'timestamp': 1410199200,
},
'skip': 'Requires logging in',
}, {
'url': 'https://www.facebook.com/video.php?v=274175099429670',
'info_dict': {
'id': '274175099429670',
'ext': 'mp4',
'title': 're:^Asif Nawab Butt posted a video',
'uploader': 'Asif Nawab Butt',
'upload_date': '20140506',
'timestamp': 1399398998,
'thumbnail': r're:^https?://.*',
},
'expected_warnings': [
'title'
]
}, {
'note': 'Video with DASH manifest',
'url': 'https://www.facebook.com/video.php?v=957955867617029',
'md5': 'b2c28d528273b323abe5c6ab59f0f030',
'info_dict': {
'id': '957955867617029',
'ext': 'mp4',
'title': 'When you post epic content on instagram.com/433 8 million followers, this is ...',
'uploader': 'Demy de Zeeuw',
'upload_date': '20160110',
'timestamp': 1452431627,
},
'skip': 'Requires logging in',
}, {
'url': 'https://www.facebook.com/maxlayn/posts/10153807558977570',
'md5': '037b1fa7f3c2d02b7a0d7bc16031ecc6',
'info_dict': {
'id': '544765982287235',
'ext': 'mp4',
'title': '"What are you doing running in the snow?"',
'uploader': 'FailArmy',
},
'skip': 'Video gone',
}, {
'url': 'https://m.facebook.com/story.php?story_fbid=1035862816472149&id=116132035111903',
'md5': '1deb90b6ac27f7efcf6d747c8a27f5e3',
'info_dict': {
'id': '1035862816472149',
'ext': 'mp4',
'title': 'What the Flock Is Going On In New Zealand Credit: ViralHog',
'uploader': 'S. Saint',
},
'skip': 'Video gone',
}, {
'note': 'swf params escaped',
'url': 'https://www.facebook.com/barackobama/posts/10153664894881749',
'md5': '97ba073838964d12c70566e0085c2b91',
'info_dict': {
'id': '10153664894881749',
'ext': 'mp4',
'title': 'Average time to confirm recent Supreme Court nominees: 67 days Longest it\'s t...',
'thumbnail': r're:^https?://.*',
'timestamp': 1456259628,
'upload_date': '20160223',
'uploader': 'Barack Obama',
},
}, {
# have 1080P, but only up to 720p in swf params
# data.video.story.attachments[].media
'url': 'https://www.facebook.com/cnn/videos/10155529876156509/',
'md5': '9571fae53d4165bbbadb17a94651dcdc',
'info_dict': {
'id': '10155529876156509',
'ext': 'mp4',
'title': 'She survived the holocaust — and years later, she’s getting her citizenship s...',
'timestamp': 1477818095,
'upload_date': '20161030',
'uploader': 'CNN',
'thumbnail': r're:^https?://.*',
'view_count': int,
},
}, {
# bigPipe.onPageletArrive ... onPageletArrive pagelet_group_mall
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.media
'url': 'https://www.facebook.com/yaroslav.korpan/videos/1417995061575415/',
'info_dict': {
'id': '1417995061575415',
'ext': 'mp4',
'title': 'md5:1db063d6a8c13faa8da727817339c857',
'timestamp': 1486648217,
'upload_date': '20170209',
'uploader': 'Yaroslav Korpan',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.facebook.com/LaGuiaDelVaron/posts/1072691702860471',
'info_dict': {
'id': '1072691702860471',
'ext': 'mp4',
'title': 'md5:ae2d22a93fbb12dad20dc393a869739d',
'timestamp': 1477305000,
'upload_date': '20161024',
'uploader': 'La Guía Del Varón',
'thumbnail': r're:^https?://.*',
},
'params': {
'skip_download': True,
},
}, {
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.media
'url': 'https://www.facebook.com/groups/1024490957622648/permalink/1396382447100162/',
'info_dict': {
'id': '1396382447100162',
'ext': 'mp4',
'title': 'md5:19a428bbde91364e3de815383b54a235',
'timestamp': 1486035494,
'upload_date': '20170202',
'uploader': 'Elisabeth Ahtn',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf',
'only_matching': True,
}, {
# data.mediaset.currMedia.edges
'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater',
'only_matching': True,
}, {
# data.video.story.attachments[].media
'url': 'facebook:544765982287235',
'only_matching': True,
}, {
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.media
'url': 'https://www.facebook.com/groups/164828000315060/permalink/764967300301124/',
'only_matching': True,
}, {
# data.video.creation_story.attachments[].media
'url': 'https://zh-hk.facebook.com/peoplespower/videos/1135894589806027/',
'only_matching': True,
}, {
# data.video
'url': 'https://www.facebookcorewwwi.onion/video.php?v=274175099429670',
'only_matching': True,
}, {
# no title
'url': 'https://www.facebook.com/onlycleverentertainment/videos/1947995502095005/',
'only_matching': True,
}, {
# data.video
'url': 'https://www.facebook.com/WatchESLOne/videos/359649331226507/',
'info_dict': {
'id': '359649331226507',
'ext': 'mp4',
'title': 'One Dota 2',
},
'params': {
'skip_download': True,
},
}, {
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.all_subattachments.nodes[].media
'url': 'https://www.facebook.com/100033620354545/videos/106560053808006/',
'info_dict': {
'id': '106560053808006',
},
'playlist_count': 2,
}, {
# data.video.story.attachments[].media
'url': 'https://www.facebook.com/watch/?v=647537299265662',
'only_matching': True,
}, {
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.all_subattachments.nodes[].media
'url': 'https://www.facebook.com/PankajShahLondon/posts/10157667649866271',
'info_dict': {
'id': '10157667649866271',
},
'playlist_count': 3,
}, {
# data.nodes[].comet_sections.content.story.attachments[].style_type_renderer.attachment.media
'url': 'https://m.facebook.com/Alliance.Police.Department/posts/4048563708499330',
'info_dict': {
'id': '117576630041613',
'ext': 'mp4',
# TODO: title can be extracted from video page
'title': 'Facebook video
'uploader_id': '189393014416438',
'upload_date': '20201123',
'timestamp': 1606162592,
},
'skip': 'Requires logging in',
}, {
# node.comet_sections.content.story.attached_story.attachments.style_type_renderer.attachment.media
'url': 'https://www.facebook.com/groups/ateistiskselskab/permalink/10154930137678856/',
'info_dict': {
'id': '211567722618337',
'ext': 'mp4',
'title': 'Facebook video
'uploader_id': '127875227654254',
'upload_date': '20161122',
'timestamp': 1479793574,
},
}, {
# data.video.creation_story.attachments[].media
'url': 'https://www.facebook.com/watch/live/?v=1823658634322275',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/watchparty/211641140192478',
'info_dict': {
'id': '211641140192478',
},
'playlist_count': 1,
'skip': 'Requires logging in',
}]
_SUPPORTED_PAGLETS_REGEX = r'(?:pagelet_group_mall|permalink_video_pagelet|hyperfeed_story_id_[0-9a-f]+)'
_api_config = {
'graphURI': '/api/graphql/'
}
@staticmethod
def _extract_urls(webpage):
urls = []
for mobj in re.finditer(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://www\.facebook\.com/(?:video/embed|plugins/video\.php).+?)\1',
webpage):
urls.append(mobj.group('url'))
# Facebook API embed
# see https://developers.facebook.com/docs/plugins/embedded-video-player
for mobj in re.finditer(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\bfb-(?:video|post)\b[^\'"]*(?P=q1)[^>]+
data-href=(?P<q2>[\'"])(?P<url>(?:https?:)?//(?:www\.)?facebook.com/.+?)(?P=q2)''', webpage):
urls.append(mobj.group('url'))
return urls
def _login(self):
useremail, password = self._get_login_info()
if useremail is None:
return
login_page_req = sanitized_Request(self._LOGIN_URL)
self._set_cookie('facebook.com', 'locale', 'en_US')
login_page = self._download_webpage(login_page_req, None,
note='Downloading login page',
errnote='Unable to download login page')
lsd = self._search_regex(
r'<input type="hidden" name="lsd" value="([^"]*)"',
login_page, 'lsd')
lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
login_form = {
'email': useremail,
'pass': password,
'lsd': lsd,
'lgnrnd': lgnrnd,
'next': 'http://facebook.com/home.php',
'default_persistent': '0',
'legacy_return': '1',
'timezone': '-60',
'trynum': '1',
}
request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
try:
login_results = self._download_webpage(request, None,
note='Logging in', errnote='unable to fetch login page')
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
error = self._html_search_regex(
r'(?s)<div[^>]+class=(["\']).*?login_error_box.*?\1[^>]*><div[^>]*>.*?</div><div[^>]*>(?P<error>.+?)</div>',
login_results, 'login error', default=None, group='error')
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
return
fb_dtsg = self._search_regex(
r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg', default=None)
h = self._search_regex(
r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h', default=None)
if not fb_dtsg or not h:
return
check_form = {
'fb_dtsg': fb_dtsg,
'h': h,
'name_action_selected': 'dont_save',
}
check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
check_response = self._download_webpage(check_req, None,
note='Confirming login')
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
return
def _real_initialize(self):
self._login()
def _extract_from_url(self, url, video_id):
webpage = self._download_webpage(
url.replace('://m.facebook.com/', '://www.facebook.com/'), video_id)
video_data = None
def extract_video_data(instances):
video_data = []
for item in instances:
if try_get(item, lambda x: x[1][0]) == 'VideoConfig':
video_item = item[2][0]
if video_item.get('video_id'):
video_data.append(video_item['videoData'])
return video_data
server_js_data = self._parse_json(self._search_regex(
[r'handleServerJS\(({.+})(?:\);|,")', r'\bs\.handle\(({.+?})\);'],
webpage, 'server js data', default='{}'), video_id, fatal=False)
if server_js_data:
video_data = extract_video_data(server_js_data.get('instances', []))
def extract_from_jsmods_instances(js_data):
if js_data:
return extract_video_data(try_get(
js_data, lambda x: x['jsmods']['instances'], list) or [])
def extract_dash_manifest(video, formats):
dash_manifest = video.get('dash_manifest')
if dash_manifest:
formats.extend(self._parse_mpd_formats(
compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest))))
def process_formats(formats):
# Downloads with browser's User-Agent are rate limited. Working around
# with non-browser User-Agent.
for f in formats:
f.setdefault('http_headers', {})['User-Agent'] = 'facebookexternalhit/1.1'
self._sort_formats(formats)
def extract_relay_data(_filter):
return self._parse_json(self._search_regex(
r'handleWithCustomApplyEach\([^,]+,\s*({.*?%s.*?})\);' % _filter,
webpage, 'replay data', default='{}'), video_id, fatal=False) or {}
def extract_relay_prefetched_data(_filter):
replay_data = extract_relay_data(_filter)
for require in (replay_data.get('require') or []):
if require[0] == 'RelayPrefetchedStreamCache':
return try_get(require, lambda x: x[3][1]['__bbox']['result']['data'], dict) or {}
if not video_data:
server_js_data = self._parse_json(self._search_regex([
r'bigPipe\.onPageletArrive\(({.+?})\)\s*;\s*}\s*\)\s*,\s*["\']onPageletArrive\s+' + self._SUPPORTED_PAGLETS_REGEX,
r'bigPipe\.onPageletArrive\(({.*?id\s*:\s*"%s".*?})\);' % self._SUPPORTED_PAGLETS_REGEX
], webpage, 'js data', default='{}'), video_id, js_to_json, False)
video_data = extract_from_jsmods_instances(server_js_data)
if not video_data:
data = extract_relay_prefetched_data(
r'"(?:dash_manifest|playable_url(?:_quality_hd)?)"\s*:\s*"[^"]+"')
if data:
entries = []
def parse_graphql_video(video):
formats = []
q = qualities(['sd', 'hd'])
for (suffix, format_id) in [('', 'sd'), ('_quality_hd', 'hd')]:
playable_url = video.get('playable_url' + suffix)
if not playable_url:
continue
formats.append({
'format_id': format_id,
'quality': q(format_id),
'url': playable_url,
})
extract_dash_manifest(video, formats)
process_formats(formats)
v_id = video.get('videoId') or video.get('id') or video_id
info = {
'id': v_id,
'formats': formats,
'thumbnail': try_get(video, lambda x: x['thumbnailImage']['uri']),
'uploader_id': try_get(video, lambda x: x['owner']['id']),
'timestamp': int_or_none(video.get('publish_time')),
'duration': float_or_none(video.get('playable_duration_in_ms'), 1000),
}
description = try_get(video, lambda x: x['savable_description']['text'])
title = video.get('name')
if title:
info.update({
'title': title,
'description': description,
})
else:
info['title'] = description or 'Facebook video #%s' % v_id
entries.append(info)
def parse_attachment(attachment, key='media'):
media = attachment.get(key) or {}
if media.get('__typename') == 'Video':
return parse_graphql_video(media)
nodes = data.get('nodes') or []
node = data.get('node') or {}
if not nodes and node:
nodes.append(node)
for node in nodes:
story = try_get(node, lambda x: x['comet_sections']['content']['story'], dict) or {}
attachments = try_get(story, [
lambda x: x['attached_story']['attachments'],
lambda x: x['attachments']
], list) or []
for attachment in attachments:
attachment = try_get(attachment, lambda x: x['style_type_renderer']['attachment'], dict)
ns = try_get(attachment, lambda x: x['all_subattachments']['nodes'], list) or []
for n in ns:
parse_attachment(n)
parse_attachment(attachment)
edges = try_get(data, lambda x: x['mediaset']['currMedia']['edges'], list) or []
for edge in edges:
parse_attachment(edge, key='node')
video = data.get('video') or {}
if video:
attachments = try_get(video, [
lambda x: x['story']['attachments'],
lambda x: x['creation_story']['attachments']
], list) or []
for attachment in attachments:
parse_attachment(attachment)
if not entries:
parse_graphql_video(video)
return self.playlist_result(entries, video_id)
if not video_data:
m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
if m_msg is not None:
raise ExtractorError(
'The video is not available, Facebook said: "%s"' % m_msg.group(1),
expected=True)
elif '>You must log in to continue' in webpage:
self.raise_login_required()
if not video_data and '/watchparty/' in url:
post_data = {
'doc_id': 3731964053542869,
'variables': json.dumps({
'livingRoomID': video_id,
}),
}
prefetched_data = extract_relay_prefetched_data(r'"login_data"\s*:\s*{')
if prefetched_data:
lsd = try_get(prefetched_data, lambda x: x['login_data']['lsd'], dict)
if lsd:
post_data[lsd['name']] = lsd['value']
relay_data = extract_relay_data(r'\[\s*"RelayAPIConfigDefaults"\s*,')
for define in (relay_data.get('define') or []):
if define[0] == 'RelayAPIConfigDefaults':
self._api_config = define[2]
living_room = self._download_json(
urljoin(url, self._api_config['graphURI']), video_id,
data=urlencode_postdata(post_data))['data']['living_room']
entries = []
for edge in (try_get(living_room, lambda x: x['recap']['watched_content']['edges']) or []):
video = try_get(edge, lambda x: x['node']['video']) or {}
v_id = video.get('id')
if not v_id:
continue
v_id = compat_str(v_id)
entries.append(self.url_result(
self._VIDEO_PAGE_TEMPLATE % v_id,
self.ie_key(), v_id, video.get('name')))
return self.playlist_result(entries, video_id)
if not video_data:
# Video info not in first request, do a secondary request using
# tahoe player specific URL
tahoe_data = self._download_webpage(
self._VIDEO_PAGE_TAHOE_TEMPLATE % video_id, video_id,
data=urlencode_postdata({
'__a': 1,
'__pc': self._search_regex(
r'pkg_cohort["\']\s*:\s*["\'](.+?)["\']', webpage,
'pkg cohort', default='PHASED:DEFAULT'),
'__rev': self._search_regex(
r'client_revision["\']\s*:\s*(\d+),', webpage,
'client revision', default='3944515'),
'fb_dtsg': self._search_regex(
r'"DTSGInitialData"\s*,\s*\[\]\s*,\s*{\s*"token"\s*:\s*"([^"]+)"',
webpage, 'dtsg token', default=''),
}),
headers={
'Content-Type': 'application/x-www-form-urlencoded',
})
tahoe_js_data = self._parse_json(
self._search_regex(
r'for\s+\(\s*;\s*;\s*\)\s*;(.+)', tahoe_data,
'tahoe js data', default='{}'),
video_id, fatal=False)
video_data = extract_from_jsmods_instances(tahoe_js_data)
if not video_data:
raise ExtractorError('Cannot parse data')
if len(video_data) > 1:
entries = []
for v in video_data:
video_url = v[0].get('video_url')
if not video_url:
continue
entries.append(self.url_result(urljoin(
url, video_url), self.ie_key(), v[0].get('video_id')))
return self.playlist_result(entries, video_id)
video_data = video_data[0]
formats = []
subtitles = {}
for f in video_data:
format_id = f['stream_type']
if f and isinstance(f, dict):
f = [f]
if not f or not isinstance(f, list):
continue
for quality in ('sd', 'hd'):
for src_type in ('src', 'src_no_ratelimit'):
src = f[0].get('%s_%s' % (quality, src_type))
if src:
preference = -10 if format_id == 'progressive' else 0
if quality == 'hd':
preference += 5
formats.append({
'format_id': '%s_%s_%s' % (format_id, quality, src_type),
'url': src,
'quality': preference,
})
extract_dash_manifest(f[0], formats)
subtitles_src = f[0].get('subtitles_src')
if subtitles_src:
subtitles.setdefault('en', []).append({'url': subtitles_src})
if not formats:
raise ExtractorError('Cannot find video formats')
process_formats(formats)
video_title = self._html_search_regex(
r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage,
'title', default=None)
if not video_title:
video_title = self._html_search_regex(
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
webpage, 'alternative title', default=None)
if not video_title:
video_title = self._html_search_meta(
'description', webpage, 'title', default=None)
if video_title:
video_title = limit_length(video_title, 80)
else:
video_title = 'Facebook video
uploader = clean_html(get_element_by_id(
'fbPhotoPageAuthorName', webpage)) or self._search_regex(
r'ownerName\s*:\s*"([^"]+)"', webpage, 'uploader',
default=None) or self._og_search_title(webpage, fatal=False)
timestamp = int_or_none(self._search_regex(
r'<abbr[^>]+data-utime=["\'](\d+)', webpage,
'timestamp', default=None))
thumbnail = self._html_search_meta(['og:image', 'twitter:image'], webpage)
view_count = parse_count(self._search_regex(
r'\bviewCount\s*:\s*["\']([\d,.]+)', webpage, 'view count',
default=None))
info_dict = {
'id': video_id,
'title': video_title,
'formats': formats,
'uploader': uploader,
'timestamp': timestamp,
'thumbnail': thumbnail,
'view_count': view_count,
'subtitles': subtitles,
}
return info_dict
def _real_extract(self, url):
video_id = self._match_id(url)
real_url = self._VIDEO_PAGE_TEMPLATE % video_id if url.startswith('facebook:') else url
return self._extract_from_url(real_url, video_id)
class FacebookPluginsVideoIE(InfoExtractor):
_VALID_URL = r'https?://(?:[\w-]+\.)?facebook\.com/plugins/video\.php\?.*?\bhref=(?P<id>https.+)'
_TESTS = [{
'url': 'https://www.facebook.com/plugins/video.php?href=https%3A%2F%2Fwww.facebook.com%2Fgov.sg%2Fvideos%2F10154383743583686%2F&show_text=0&width=560',
'md5': '5954e92cdfe51fe5782ae9bda7058a07',
'info_dict': {
'id': '10154383743583686',
'ext': 'mp4',
'title': 'What to do during the haze?',
'uploader': 'Gov.sg',
'upload_date': '20160826',
'timestamp': 1472184808,
},
'add_ie': [FacebookIE.ie_key()],
}, {
'url': 'https://www.facebook.com/plugins/video.php?href=https%3A%2F%2Fwww.facebook.com%2Fvideo.php%3Fv%3D10204634152394104',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/plugins/video.php?href=https://www.facebook.com/gov.sg/videos/10154383743583686/&show_text=0&width=560',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
compat_urllib_parse_unquote(self._match_id(url)),
FacebookIE.ie_key())
| true
| true
|
7906e91271c9ceae69038fae01ff08051a3e6531
| 31,779
|
py
|
Python
|
app.py
|
krishnaaxo/Finance-Forcasting-Dashboard
|
6386247b7e661fb0804b80d4c77dd5dcd94a7e87
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
krishnaaxo/Finance-Forcasting-Dashboard
|
6386247b7e661fb0804b80d4c77dd5dcd94a7e87
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
krishnaaxo/Finance-Forcasting-Dashboard
|
6386247b7e661fb0804b80d4c77dd5dcd94a7e87
|
[
"Apache-2.0"
] | 1
|
2021-08-10T05:02:10.000Z
|
2021-08-10T05:02:10.000Z
|
import pandas as pd
import tweepy
from textblob import TextBlob
from wordcloud import WordCloud
import plotly.graph_objs as go
import os
import re
import pystan
import numpy as np
import streamlit as st
import matplotlib.pyplot as plt
import yfinance as yf
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
from GoogleNews import GoogleNews
from ta.volatility import BollingerBands
from ta.trend import MACD
from ta.momentum import RSIIndicator
import datetime as datetime
import base64
import pandas as pd
import plotly.express as px
import datetime
import requests
from bs4 import BeautifulSoup
from datetime import date
from plotly import graph_objs
st.set_page_config(
layout="wide",
initial_sidebar_state="auto",
page_title= "Finance-Forcasting-Dashboard",
page_icon= "Images/growth.png",
)
col1, col2, col3 = st.beta_columns([1,2,1])
col1.write("")
col2.image("Images/LL.png", width = 500)
col3.write("")
st.set_option('deprecation.showPyplotGlobalUse', False)
main_bg = "Images/BACK.png"
main_bg_ext = "Images/BACK.png"
st.markdown(
f"""
<style>
.reportview-container {{
background: url(data:image/{main_bg_ext};base64,{base64.b64encode(open(main_bg, "rb").read()).decode()})
}}
</style>
""",
unsafe_allow_html=True
)
###############################Funtions############################
# load data from yahoo finance
def load_data(ticker):
start = "2020-01-01"
today = date.today().strftime("%Y-%m-%d")
data = yf.download(ticker, start, today)
data.reset_index(inplace=True)
return data
# Plot raw data
def plot_raw_data():
fig = graph_objs.Figure()
fig.add_trace(graph_objs.Scatter(x=data['Date'], y=data['Open'], name="stock_open"))
fig.add_trace(graph_objs.Scatter(x=data['Date'], y=data['Close'], name="stock_close"))
fig.layout.update(title_text='Time Series data with Rangeslider', xaxis_rangeslider_visible=True)
st.plotly_chart(fig)
def get_forecast(data):
model = Prophet()
model.fit(data)
future = model.make_future_dataframe(periods=7)
forecast = model.predict(future)
return model, forecast
@st.cache
def read_data():
url = "https://raw.githubusercontent.com/emrecanaltinsoy/forex_data/main/forex_usd_data.csv"
data = pd.read_csv(url)
cols = data.columns
return data, cols[1:]
@st.cache
def get_range(data, date_range):
start_index = data.index[data["date(y-m-d)"] == str(date_range[0])].tolist()[0]
end_index = data.index[data["date(y-m-d)"] == str(date_range[1])].tolist()[0]
data = data.iloc[start_index : end_index + 1]
cols = data.columns
dates = data["date(y-m-d)"]
return data, dates
@st.cache
def scrape_currency():
today = datetime.date.today()
base_url = "https://www.x-rates.com/historical/?from=USD&amount=1&date"
year = today.year
month = today.month if today.month > 9 else f"0{today.month}"
day = today.day if today.day > 9 else f"0{today.day}"
URL = f"{base_url}={year}-{month}-{day}"
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
table = soup.find_all("tr")[12:]
currencies = [table[i].text.split("\n")[1:3][0] for i in range(len(table))]
currencies.insert(0, "date(y-m-d)")
currencies.insert(1, "American Dollar")
rates = [table[i].text.split("\n")[1:3][1] for i in range(len(table))]
rates.insert(0, f"{year}-{month}-{day}")
rates.insert(1, "1")
curr_data = {currencies[i]: rates[i] for i in range(len(rates))}
curr_data = pd.DataFrame(curr_data, index=[0])
cols = curr_data.columns
return curr_data, cols[1:]
@st.cache
def train_model(data, currency, period):
df_train = data[["date(y-m-d)", currency]]
df_train = df_train.iloc[-365*2 :]
df_train = df_train.rename(columns={"date(y-m-d)": "ds", currency: "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
return forecast, m
df_all, columns = read_data()
################################################################################
st.sidebar.image("Images/Menu.png", width = 330)
menu = ["Home","STOCKS Live Forcasting", "Crypto-Live Forcasting","View Historical Currency Charts", "Check Live Currency Exchange rates", "Forecast Currency Live Prices"]
choice = st.sidebar.selectbox("Menu", menu)
if choice == "Home":
st.write("")
st.write(""" <p style=" font-size: 15px; font-weight:normal; font-family:verdana"> Finance Dashboard is a special web service that allows you to view Cryptocurrencies,Stocks,and Live Currency Values by many useful methods (technical indicators, graphical patterns, sentimental analysis, and more). Trading and crypto investing requires constant analysis and monitoring. Traders need to track all their trades in order to improve results and find errors. If you don't use additional instruments, then trading will be unsystematic, and the results will be uncertain. Such a service will be useful and even extremely necessary for those who trade and invest in cryptocurrencies and Stocks. Competent selection of cryptocurrencies is at least half of investment success. Finance Dashboard has a simple interface and is great for quick analysis of the Stock market. </p>
""", unsafe_allow_html=True)
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
st.write(""" <p style=" color:#E75480; font-size: 30px; font-weight:bold"> How does it work? </p>
""", unsafe_allow_html=True)
st.write("")
st.image("Images/How.png", width = 1300)
st.sidebar.write(" ")
st.sidebar.write(" ")
st.sidebar.image("Images/info.png", width = 300)
elif choice == "STOCKS Live Forcasting":
st.title('Stocks Weekly Forecast')
st.subheader('Enter the stock ticker:')
ticker = st.text_input('example: GOOG')
ticket = ticker.upper()
if len(ticker)>0:
data_load_state = st.text('Loading data...')
data = load_data(ticker)
if data.empty:
data_load_state.text(f'No ticker named {ticker}')
ticker = ''
else:
data_load_state.text('Loading data... done!')
st.subheader(f'Company: {yf.Ticker(ticker).info["longName"]}')
st.write(data.head())
plot_raw_data()
# prepare data for forecasting
df_train = data[['Date','Close']]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
# train and forecast
model, forecast = get_forecast(df_train)
st.subheader('Forecast')
# plot forecast
st.write(f'Forecast plot for the next week')
fig = plot_plotly(model, forecast)
st.plotly_chart(fig)
elif choice == "View Historical Currency Charts":
st.write("This app can be used to view historical **currency** charts!")
date_range = st.date_input(
"Choose date range",
value=(
datetime.date(2011, 1, 1),
datetime.date(2011, 1, 1) + datetime.timedelta(df_all.shape[0] - 1),
),
min_value=datetime.date(2011, 1, 1),
max_value=datetime.date(2011, 1, 1) + datetime.timedelta(df_all.shape[0] - 1),
)
df, dates = get_range(df_all, date_range)
selected_curr = st.multiselect("Select currencies", columns)
ok = st.button("View")
if ok:
if selected_curr:
# st.write(df[selected_curr])
for curr in selected_curr:
fig = px.line(
x=dates,
y=df[curr],
)
fig.update_layout(
xaxis_title="Date",
yaxis_title=curr,
)
st.write(fig)
elif choice == "Check Live Currency Exchange rates":
st.write("This app can be used to check current **currency** data!")
daily_df, columns = scrape_currency()
base_curr = st.selectbox("Select the base currency", columns)
selected_curr = st.multiselect("Select currencies", columns)
if selected_curr:
base = daily_df[base_curr].astype(float)
selected = daily_df[selected_curr].astype(float)
converted = selected / float(base)
st.write(converted)
elif choice == "Forecast Currency Live Prices":
currency = st.selectbox("Select the currency for prediction", columns)
n_weeks = st.slider("Weeks of prediction", 4, 20, 8, 1)
ok = st.button("Predict")
if ok:
train_state = st.text("Training the model...")
pred, model = train_model(df_all, currency, period=n_weeks * 7)
train_state.text("Model training completed!!")
st.subheader("Forecast data")
fig1 = plot_plotly(model, pred)
st.plotly_chart(fig1)
elif choice == "Crypto-Live Forcasting":
st.sidebar.header("Please select cryptocurrency")
option = st.sidebar.selectbox("Ticker Symbol",("BTC-USD", "ETH-USD", "XRP-USD", "DOGE-USD", "ADA-USD", "BNB-USD", "LTC-USD",))
today = datetime.date.today()
before = today - datetime.timedelta(days=1400)
start_date = st.sidebar.date_input('Start date', before)
end_date = st.sidebar.date_input('End date', today)
if start_date < end_date:
st.sidebar.success("Start date: `%s`\n\nEnd date: `%s` " % (start_date, end_date))
else:
st.sidebar.error("Error: End date must fall after start date.")
@st.cache(allow_output_mutation = True)
def get_data(option, start_date, end_date):
df = yf.download(option,start= start_date,end = end_date, progress=False)
return df
# Getting API_KEYS
api_key = os.environ.get("Key")
api_secret = os.environ.get("Secret")
# Function for getting tweets
# Create authentication
@st.cache(allow_output_mutation = True)
def get_tweets(key, secret, search_term):
authentication = tweepy.OAuthHandler(api_key, api_secret)
api = tweepy.API(authentication)
term = search_term+"-filter:retweets"
# Create a cursor object
tweets = tweepy.Cursor(api.search, q = term, lang = "en",
since = today, tweet_mode = "extended").items(100)
# Store the tweets
tweets_text = [tweet.full_text for tweet in tweets]
df = pd.DataFrame(tweets_text, columns = ["Tweets"])
return df
# Clean text
@st.cache(allow_output_mutation = True)
def Clean(twt):
twt = re.sub("#cryptocurrency", "cryptocurrency", twt)
twt = re.sub("#Cryptocurrency", "Cryptocurrency", twt)
twt = re.sub("#[A-Za-z0-9]+", "", twt)
twt = re.sub("RT[\s]+", "", twt)
twt = re.sub("\\n", "", twt)
twt = re.sub("https?\://\S+", '', twt)
twt = re.sub("<br />", "", twt)
twt = re.sub("\d","", twt)
twt = re.sub("it\'s", "it is", twt)
twt = re.sub("can\'t", "cannot", twt)
twt = re.sub("<(?:a\b[^>]*>|/a>)", "", twt)
return twt
# Subjectivity and Polarity
@st.cache(allow_output_mutation = True)
def subjectivity(text):
return TextBlob(text).sentiment.subjectivity
@st.cache(allow_output_mutation = True)
def polarity(text):
return TextBlob(text).sentiment.polarity
# Create a function to get sentiment text
@st.cache(allow_output_mutation = True)
def sentiment(score):
if score < 0:
return "Negative"
elif score == 0:
return "Neutral"
else:
return "Positive"
if option == "BTC-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> BTC-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
#Plot
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Bitcoin")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "ETH-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> ETH-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Etherium")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "DOGE-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> DOGE-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Dogecoin")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
st.write(" ")
elif option == "XRP-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> DOGE-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("XRP")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "ADA-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> ADA-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("cryptocurrency")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "BNB-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> BNB-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("BNB")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "LTC-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> LTC-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Litecoin")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
# Sentiment Analysis
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> How generally users feel about cryptocurrency? </p>
""", unsafe_allow_html=True)
st.write(" ")
df = get_tweets(api_key, api_secret, "#cryptocurrency")
df["Tweets"] = df["Tweets"].apply(Clean)
df["Subjectivity"] = df["Tweets"].apply(subjectivity)
df["Polarity"] = df["Tweets"].apply(polarity)
#WordCloud
words = " ".join([twts for twts in df["Tweets"]])
cloud = WordCloud(random_state = 21, max_font_size = 100).generate(words)
plt.imshow(cloud, interpolation = "bilinear")
plt.axis("off")
st.pyplot()
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Sentiment Bar Plot </p>
""", unsafe_allow_html=True)
st.write(" ")
# Get Sentiment tweets
df["Sentiment"] = df["Polarity"].apply(sentiment)
df["Sentiment"].value_counts().plot(kind = "bar", figsize = (10,5))
plt.title("Sentiment Analysis Bar Plot")
plt.xlabel("Sentiment")
plt.ylabel("Number of Tweets")
st.pyplot()
| 27.208048
| 870
| 0.6082
|
import pandas as pd
import tweepy
from textblob import TextBlob
from wordcloud import WordCloud
import plotly.graph_objs as go
import os
import re
import pystan
import numpy as np
import streamlit as st
import matplotlib.pyplot as plt
import yfinance as yf
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
from GoogleNews import GoogleNews
from ta.volatility import BollingerBands
from ta.trend import MACD
from ta.momentum import RSIIndicator
import datetime as datetime
import base64
import pandas as pd
import plotly.express as px
import datetime
import requests
from bs4 import BeautifulSoup
from datetime import date
from plotly import graph_objs
st.set_page_config(
layout="wide",
initial_sidebar_state="auto",
page_title= "Finance-Forcasting-Dashboard",
page_icon= "Images/growth.png",
)
col1, col2, col3 = st.beta_columns([1,2,1])
col1.write("")
col2.image("Images/LL.png", width = 500)
col3.write("")
st.set_option('deprecation.showPyplotGlobalUse', False)
main_bg = "Images/BACK.png"
main_bg_ext = "Images/BACK.png"
st.markdown(
f"""
<style>
.reportview-container {{
background: url(data:image/{main_bg_ext};base64,{base64.b64encode(open(main_bg, "rb").read()).decode()})
}}
</style>
""",
unsafe_allow_html=True
)
:3][1] for i in range(len(table))]
rates.insert(0, f"{year}-{month}-{day}")
rates.insert(1, "1")
curr_data = {currencies[i]: rates[i] for i in range(len(rates))}
curr_data = pd.DataFrame(curr_data, index=[0])
cols = curr_data.columns
return curr_data, cols[1:]
@st.cache
def train_model(data, currency, period):
df_train = data[["date(y-m-d)", currency]]
df_train = df_train.iloc[-365*2 :]
df_train = df_train.rename(columns={"date(y-m-d)": "ds", currency: "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
return forecast, m
df_all, columns = read_data()
ltiselect("Select currencies", columns)
ok = st.button("View")
if ok:
if selected_curr:
# st.write(df[selected_curr])
for curr in selected_curr:
fig = px.line(
x=dates,
y=df[curr],
)
fig.update_layout(
xaxis_title="Date",
yaxis_title=curr,
)
st.write(fig)
elif choice == "Check Live Currency Exchange rates":
st.write("This app can be used to check current **currency** data!")
daily_df, columns = scrape_currency()
base_curr = st.selectbox("Select the base currency", columns)
selected_curr = st.multiselect("Select currencies", columns)
if selected_curr:
base = daily_df[base_curr].astype(float)
selected = daily_df[selected_curr].astype(float)
converted = selected / float(base)
st.write(converted)
elif choice == "Forecast Currency Live Prices":
currency = st.selectbox("Select the currency for prediction", columns)
n_weeks = st.slider("Weeks of prediction", 4, 20, 8, 1)
ok = st.button("Predict")
if ok:
train_state = st.text("Training the model...")
pred, model = train_model(df_all, currency, period=n_weeks * 7)
train_state.text("Model training completed!!")
st.subheader("Forecast data")
fig1 = plot_plotly(model, pred)
st.plotly_chart(fig1)
elif choice == "Crypto-Live Forcasting":
st.sidebar.header("Please select cryptocurrency")
option = st.sidebar.selectbox("Ticker Symbol",("BTC-USD", "ETH-USD", "XRP-USD", "DOGE-USD", "ADA-USD", "BNB-USD", "LTC-USD",))
today = datetime.date.today()
before = today - datetime.timedelta(days=1400)
start_date = st.sidebar.date_input('Start date', before)
end_date = st.sidebar.date_input('End date', today)
if start_date < end_date:
st.sidebar.success("Start date: `%s`\n\nEnd date: `%s` " % (start_date, end_date))
else:
st.sidebar.error("Error: End date must fall after start date.")
@st.cache(allow_output_mutation = True)
def get_data(option, start_date, end_date):
df = yf.download(option,start= start_date,end = end_date, progress=False)
return df
# Getting API_KEYS
api_key = os.environ.get("Key")
api_secret = os.environ.get("Secret")
# Function for getting tweets
# Create authentication
@st.cache(allow_output_mutation = True)
def get_tweets(key, secret, search_term):
authentication = tweepy.OAuthHandler(api_key, api_secret)
api = tweepy.API(authentication)
term = search_term+"-filter:retweets"
# Create a cursor object
tweets = tweepy.Cursor(api.search, q = term, lang = "en",
since = today, tweet_mode = "extended").items(100)
# Store the tweets
tweets_text = [tweet.full_text for tweet in tweets]
df = pd.DataFrame(tweets_text, columns = ["Tweets"])
return df
# Clean text
@st.cache(allow_output_mutation = True)
def Clean(twt):
twt = re.sub("#cryptocurrency", "cryptocurrency", twt)
twt = re.sub("#Cryptocurrency", "Cryptocurrency", twt)
twt = re.sub("#[A-Za-z0-9]+", "", twt)
twt = re.sub("RT[\s]+", "", twt)
twt = re.sub("\\n", "", twt)
twt = re.sub("https?\://\S+", '', twt)
twt = re.sub("<br />", "", twt)
twt = re.sub("\d","", twt)
twt = re.sub("it\'s", "it is", twt)
twt = re.sub("can\'t", "cannot", twt)
twt = re.sub("<(?:a\b[^>]*>|/a>)", "", twt)
return twt
# Subjectivity and Polarity
@st.cache(allow_output_mutation = True)
def subjectivity(text):
return TextBlob(text).sentiment.subjectivity
@st.cache(allow_output_mutation = True)
def polarity(text):
return TextBlob(text).sentiment.polarity
# Create a function to get sentiment text
@st.cache(allow_output_mutation = True)
def sentiment(score):
if score < 0:
return "Negative"
elif score == 0:
return "Neutral"
else:
return "Positive"
if option == "BTC-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> BTC-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
#Plot
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Bitcoin")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "ETH-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> ETH-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Etherium")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "DOGE-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> DOGE-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Dogecoin")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
st.write(" ")
elif option == "XRP-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> DOGE-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("XRP")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "ADA-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> ADA-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("cryptocurrency")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "BNB-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> BNB-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("BNB")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "LTC-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> LTC-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Litecoin")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
# Sentiment Analysis
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> How generally users feel about cryptocurrency? </p>
""", unsafe_allow_html=True)
st.write(" ")
df = get_tweets(api_key, api_secret, "#cryptocurrency")
df["Tweets"] = df["Tweets"].apply(Clean)
df["Subjectivity"] = df["Tweets"].apply(subjectivity)
df["Polarity"] = df["Tweets"].apply(polarity)
#WordCloud
words = " ".join([twts for twts in df["Tweets"]])
cloud = WordCloud(random_state = 21, max_font_size = 100).generate(words)
plt.imshow(cloud, interpolation = "bilinear")
plt.axis("off")
st.pyplot()
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Sentiment Bar Plot </p>
""", unsafe_allow_html=True)
st.write(" ")
# Get Sentiment tweets
df["Sentiment"] = df["Polarity"].apply(sentiment)
df["Sentiment"].value_counts().plot(kind = "bar", figsize = (10,5))
plt.title("Sentiment Analysis Bar Plot")
plt.xlabel("Sentiment")
plt.ylabel("Number of Tweets")
st.pyplot()
| true
| true
|
7906ea494164766ff99587dacd54ddcc796a6d85
| 2,041
|
py
|
Python
|
graphite_beacon/handlers/pagerduty.py
|
z1nkum/graphite-beacon
|
d1fd4c34db76ac36f27e39d00a348a5dcaf51c31
|
[
"MIT"
] | null | null | null |
graphite_beacon/handlers/pagerduty.py
|
z1nkum/graphite-beacon
|
d1fd4c34db76ac36f27e39d00a348a5dcaf51c31
|
[
"MIT"
] | null | null | null |
graphite_beacon/handlers/pagerduty.py
|
z1nkum/graphite-beacon
|
d1fd4c34db76ac36f27e39d00a348a5dcaf51c31
|
[
"MIT"
] | null | null | null |
import json
import hashlib
from tornado import httpclient as hc
from tornado import gen
from graphite_beacon.handlers import LOGGER, AbstractHandler
class PagerdutyHandler(AbstractHandler):
name = 'pagerduty'
# Default options
defaults = {
'subdomain': None,
'apitoken': None,
'service_key': None
}
def init_handler(self):
self.subdomain = self.options.get('subdomain')
assert self.subdomain, 'subdomain is not defined'
self.apitoken = self.options.get('apitoken')
assert self.apitoken, 'apitoken is not defined'
self.service_key = self.options.get('service_key')
assert self.service_key, 'service_key is not defined'
self.client = hc.AsyncHTTPClient()
@gen.coroutine
def notify(self, level, alert, value, target=None, ntype=None, rule=None):
LOGGER.debug("Handler (%s) %s", self.name, level)
message = self.get_short(level, alert, value, target=target, ntype=ntype, rule=rule)
LOGGER.debug('message1:%s', message)
if level == 'normal':
event_type = 'resolve'
else:
event_type = 'trigger'
headers = {
"Content-type": "application/json",
}
client_url = None
if target:
client_url = alert.get_graph_url(target)
m = hashlib.md5()
incident_key_str = "alert={},client_url={}".format(alert.name, client_url)
m.update(incident_key_str)
incident_key = m.hexdigest()
data = {
"service_key": self.service_key,
"event_type": event_type,
"description": message,
"details": message,
"incident_key": incident_key,
"client": 'graphite-beacon',
"client_url": client_url
}
yield self.client.fetch(
"https://events.pagerduty.com/generic/2010-04-15/create_event.json",
body=json.dumps(data),
headers=headers,
method='POST'
)
| 30.462687
| 92
| 0.598726
|
import json
import hashlib
from tornado import httpclient as hc
from tornado import gen
from graphite_beacon.handlers import LOGGER, AbstractHandler
class PagerdutyHandler(AbstractHandler):
name = 'pagerduty'
defaults = {
'subdomain': None,
'apitoken': None,
'service_key': None
}
def init_handler(self):
self.subdomain = self.options.get('subdomain')
assert self.subdomain, 'subdomain is not defined'
self.apitoken = self.options.get('apitoken')
assert self.apitoken, 'apitoken is not defined'
self.service_key = self.options.get('service_key')
assert self.service_key, 'service_key is not defined'
self.client = hc.AsyncHTTPClient()
@gen.coroutine
def notify(self, level, alert, value, target=None, ntype=None, rule=None):
LOGGER.debug("Handler (%s) %s", self.name, level)
message = self.get_short(level, alert, value, target=target, ntype=ntype, rule=rule)
LOGGER.debug('message1:%s', message)
if level == 'normal':
event_type = 'resolve'
else:
event_type = 'trigger'
headers = {
"Content-type": "application/json",
}
client_url = None
if target:
client_url = alert.get_graph_url(target)
m = hashlib.md5()
incident_key_str = "alert={},client_url={}".format(alert.name, client_url)
m.update(incident_key_str)
incident_key = m.hexdigest()
data = {
"service_key": self.service_key,
"event_type": event_type,
"description": message,
"details": message,
"incident_key": incident_key,
"client": 'graphite-beacon',
"client_url": client_url
}
yield self.client.fetch(
"https://events.pagerduty.com/generic/2010-04-15/create_event.json",
body=json.dumps(data),
headers=headers,
method='POST'
)
| true
| true
|
7906ea566fb2e5dd7c123a133028b83553dc8cf5
| 3,242
|
py
|
Python
|
pogweb/models.py
|
ahnaf-zamil/pogweb
|
14ba9bde39f100dc1e7b0fbf6aa959551a8d74d1
|
[
"MIT"
] | 3
|
2021-01-25T17:03:29.000Z
|
2021-05-21T15:34:55.000Z
|
pogweb/models.py
|
ahnaf-zamil/pogweb
|
14ba9bde39f100dc1e7b0fbf6aa959551a8d74d1
|
[
"MIT"
] | null | null | null |
pogweb/models.py
|
ahnaf-zamil/pogweb
|
14ba9bde39f100dc1e7b0fbf6aa959551a8d74d1
|
[
"MIT"
] | null | null | null |
"""
Copyright 2021 K.M Ahnaf Zamil
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from urllib.parse import parse_qs
import typing
__all__: typing.Final = ["Request", "ImmutableDict", "_Redirect", "Endpoint"]
class _Redirect(object):
"""Just an object for simulating a redirect"""
def __init__(self, url: str) -> None:
self.url = url
class ImmutableDict(dict):
"""An immutable dictionary implementation for query arguments and form data"""
def __setitem__(self, k, v) -> None:
raise ValueError("ImmutableDict object cannot be modified (immutable)")
class Request(object):
"""An object that contains information related to the HTTP request"""
def __init__(self, environ):
self._environ = environ
@property
def method(self) -> str:
"""HTTP method used for the request"""
return self._environ["REQUEST_METHOD"]
@property
def endpoint(self) -> str:
"""The route/endpoint used for that specific request"""
return self._environ["PATH_INFO"]
@property
def query_args(self) -> ImmutableDict:
"""Query arguments from the request"""
args = self._environ["QUERY_STRING"]
if not args:
return ImmutableDict({})
args = args.split("&")
query_args = {}
for _arg in args:
name, value = _arg.split("=")
query_args[name] = value
return ImmutableDict(query_args)
@property
def form(self) -> typing.Optional[typing.Dict]:
"""Form data sent via HTTP request"""
data = self._environ.get("wsgi.input") # Returns io.BytesIO object
if data:
form_dict = parse_qs(data.getvalue().decode("utf-8"))
final_dict = {}
for k, v in form_dict.items():
final_dict[k] = v[0] # Since v is list containing the form data
return ImmutableDict(final_dict)
def __str__(self):
return f'<Request endpoint="{self.endpoint}" method="{self.method}">'
class Endpoint(object):
def __init__(self, route, func) -> None:
self.route = route
self.extension = None
self._func = func
def __call__(self, request: Request):
return self._func(request)
| 36.426966
| 460
| 0.677051
|
from urllib.parse import parse_qs
import typing
__all__: typing.Final = ["Request", "ImmutableDict", "_Redirect", "Endpoint"]
class _Redirect(object):
def __init__(self, url: str) -> None:
self.url = url
class ImmutableDict(dict):
def __setitem__(self, k, v) -> None:
raise ValueError("ImmutableDict object cannot be modified (immutable)")
class Request(object):
def __init__(self, environ):
self._environ = environ
@property
def method(self) -> str:
return self._environ["REQUEST_METHOD"]
@property
def endpoint(self) -> str:
return self._environ["PATH_INFO"]
@property
def query_args(self) -> ImmutableDict:
args = self._environ["QUERY_STRING"]
if not args:
return ImmutableDict({})
args = args.split("&")
query_args = {}
for _arg in args:
name, value = _arg.split("=")
query_args[name] = value
return ImmutableDict(query_args)
@property
def form(self) -> typing.Optional[typing.Dict]:
data = self._environ.get("wsgi.input")
if data:
form_dict = parse_qs(data.getvalue().decode("utf-8"))
final_dict = {}
for k, v in form_dict.items():
final_dict[k] = v[0]
return ImmutableDict(final_dict)
def __str__(self):
return f'<Request endpoint="{self.endpoint}" method="{self.method}">'
class Endpoint(object):
def __init__(self, route, func) -> None:
self.route = route
self.extension = None
self._func = func
def __call__(self, request: Request):
return self._func(request)
| true
| true
|
7906ea82114b521989075e361ad79c0e393d521b
| 4,466
|
py
|
Python
|
vsmlib/embeddings/bofang/annotate_corpus_nltk.py
|
berntham/vsmlib
|
b2ed762ff50b5dcdd6999ad75c205557e70c6598
|
[
"Apache-2.0"
] | 16
|
2017-01-04T05:18:42.000Z
|
2021-08-08T09:31:08.000Z
|
vsmlib/embeddings/bofang/annotate_corpus_nltk.py
|
berntham/vsmlib
|
b2ed762ff50b5dcdd6999ad75c205557e70c6598
|
[
"Apache-2.0"
] | 8
|
2017-07-01T04:23:53.000Z
|
2019-01-04T04:03:45.000Z
|
vsmlib/embeddings/bofang/annotate_corpus_nltk.py
|
berntham/vsmlib
|
b2ed762ff50b5dcdd6999ad75c205557e70c6598
|
[
"Apache-2.0"
] | 2
|
2017-10-31T02:21:08.000Z
|
2021-01-07T00:03:23.000Z
|
#!/usr/bin/env python
"""
convert corpus to annotated corpus
This script uses nltk for dependency parsing, which is based on stanford corenlp.
"""
import os
from nltk.parse.stanford import *
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('corenlp_path',
help='Directory to stanford corenlp') # /home/lbf/Documents/stanford-corenlp-full-2017-06-09/
parser.add_argument('--max_block_size', '-mbs', default=1000000, type=int,
help='indicate how much charactors a parser deals at one time, bigger max_block_size will consume more memeory, but should be faster.')
parser.add_argument('--corpus_path', default='./news.toy.txt',
help='Directory to corpus')
parser.add_argument('--annotated_corpus_path', default='./news.toy.annotated.txt',
help='Directory to annotated corpus')
parser.add_argument('--parser_model', '-o', choices=['edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz', 'edu/stanford/nlp/models/parser/nndep/english_UD.gz'],
default='edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz',
help='stanford parser model')
args = parser.parse_args()
class dependency_parser():
def __init__(self, path_to_jar, path_to_models_jar, model_path):
if 'nndep/' in model_path:
self.parser = StanfordNeuralDependencyParser( #StanfordNeuralDependencyParser
path_to_jar=path_to_jar,
path_to_models_jar=path_to_models_jar,
model_path=model_path, java_options='-mx5g') # , corenlp_options='-model modelOutputFile.txt.gz'
if 'lexparser/' in model_path:
self.parser = StanfordDependencyParser(
path_to_jar=path_to_jar,
path_to_models_jar=path_to_models_jar,
model_path=model_path, java_options='-mx10g')
def preprocess_text(self, text):
# hack for nltk
text = text.replace('/', '-')
# hack for output format
text = text.replace('{', '-')
text = text.replace('}', '-')
text = text.replace('[', '-')
text = text.replace(']', '-')
return text
def parse(self, text):
text = self.preprocess_text(text)
out = ''
# print(text)
try:
parse_results = self.parser.raw_parse(text) #, properties={'annotators' : 'depparse'}
for dependency_tree in parse_results:
for index, node in dependency_tree.nodes.items():
if node['word'] is None: # skip root node
continue
dependency_str = ''
for dep, index in node['deps'].items():
dependency_str += ',{}/{}'.format(str(index[0] - 1), dep)
dependency_str = dependency_str[1:]
dependency_str = '{}/{}'.format(node['rel'], node['head'])
out += '{}/{}[{}] '.format(node['word'], node['tag'], dependency_str)
out += "\n"
return out
except AssertionError as e:
print('error when parse "{}"'.format(text))
return ''
dependency_parser = dependency_parser(
path_to_jar=os.path.join(args.corenlp_path, "stanford-corenlp-3.8.0.jar"),
path_to_models_jar=os.path.join(args.corenlp_path, "stanford-corenlp-3.8.0-models.jar"),
model_path=args.parser_model)
# edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz
# edu/stanford/nlp/models/parser/nndep/english_UD.gz
start_time = time.time()
print(dependency_parser.parse("Alice's dog also likes eating sausage from Russia"))
# dependency_parser.parse('Information about the stages 50km to 80km), booking for food and accommodation (R450-38 per night) and downloadable maps are on the Freedom Challenge website call 00 27 84 567 4152 ')
block_size = 0
text = ''
with open(args.corpus_path, "r") as corpus_file, open(args.annotated_corpus_path, "w") as annotated_corpus_file:
for line in corpus_file:
text += line + "\n"
block_size += len(line)
if block_size > args.max_block_size:
out = dependency_parser.parse(text)
annotated_corpus_file.write(out)
block_size = 0
text = ''
out = dependency_parser.parse(text)
annotated_corpus_file.write(out)
end_time = time.time()
print('spend {} minutes'.format((end_time - start_time) / 60))
| 43.784314
| 211
| 0.631438
|
import os
from nltk.parse.stanford import *
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('corenlp_path',
help='Directory to stanford corenlp')
parser.add_argument('--max_block_size', '-mbs', default=1000000, type=int,
help='indicate how much charactors a parser deals at one time, bigger max_block_size will consume more memeory, but should be faster.')
parser.add_argument('--corpus_path', default='./news.toy.txt',
help='Directory to corpus')
parser.add_argument('--annotated_corpus_path', default='./news.toy.annotated.txt',
help='Directory to annotated corpus')
parser.add_argument('--parser_model', '-o', choices=['edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz', 'edu/stanford/nlp/models/parser/nndep/english_UD.gz'],
default='edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz',
help='stanford parser model')
args = parser.parse_args()
class dependency_parser():
def __init__(self, path_to_jar, path_to_models_jar, model_path):
if 'nndep/' in model_path:
self.parser = StanfordNeuralDependencyParser(
path_to_jar=path_to_jar,
path_to_models_jar=path_to_models_jar,
model_path=model_path, java_options='-mx5g')
if 'lexparser/' in model_path:
self.parser = StanfordDependencyParser(
path_to_jar=path_to_jar,
path_to_models_jar=path_to_models_jar,
model_path=model_path, java_options='-mx10g')
def preprocess_text(self, text):
text = text.replace('/', '-')
text = text.replace('{', '-')
text = text.replace('}', '-')
text = text.replace('[', '-')
text = text.replace(']', '-')
return text
def parse(self, text):
text = self.preprocess_text(text)
out = ''
try:
parse_results = self.parser.raw_parse(text)
for dependency_tree in parse_results:
for index, node in dependency_tree.nodes.items():
if node['word'] is None:
continue
dependency_str = ''
for dep, index in node['deps'].items():
dependency_str += ',{}/{}'.format(str(index[0] - 1), dep)
dependency_str = dependency_str[1:]
dependency_str = '{}/{}'.format(node['rel'], node['head'])
out += '{}/{}[{}] '.format(node['word'], node['tag'], dependency_str)
out += "\n"
return out
except AssertionError as e:
print('error when parse "{}"'.format(text))
return ''
dependency_parser = dependency_parser(
path_to_jar=os.path.join(args.corenlp_path, "stanford-corenlp-3.8.0.jar"),
path_to_models_jar=os.path.join(args.corenlp_path, "stanford-corenlp-3.8.0-models.jar"),
model_path=args.parser_model)
start_time = time.time()
print(dependency_parser.parse("Alice's dog also likes eating sausage from Russia"))
# dependency_parser.parse('Information about the stages 50km to 80km), booking for food and accommodation (R450-38 per night) and downloadable maps are on the Freedom Challenge website call 00 27 84 567 4152 ')
block_size = 0
text = ''
with open(args.corpus_path, "r") as corpus_file, open(args.annotated_corpus_path, "w") as annotated_corpus_file:
for line in corpus_file:
text += line + "\n"
block_size += len(line)
if block_size > args.max_block_size:
out = dependency_parser.parse(text)
annotated_corpus_file.write(out)
block_size = 0
text = ''
out = dependency_parser.parse(text)
annotated_corpus_file.write(out)
end_time = time.time()
print('spend {} minutes'.format((end_time - start_time) / 60))
| true
| true
|
7906ea9981d11412a7c0511aa1d3f343eb28facd
| 3,243
|
py
|
Python
|
py4syn/epics/DxpFakeClass.py
|
gabrielpreviato/py4syn
|
ac97c220d38e1aa630ff3ba4d9da030a0d3833d8
|
[
"0BSD"
] | 12
|
2015-07-12T17:15:06.000Z
|
2018-04-28T06:51:15.000Z
|
py4syn/epics/DxpFakeClass.py
|
gabrielpreviato/py4syn
|
ac97c220d38e1aa630ff3ba4d9da030a0d3833d8
|
[
"0BSD"
] | 29
|
2016-06-28T12:24:08.000Z
|
2018-10-22T15:59:43.000Z
|
py4syn/epics/DxpFakeClass.py
|
gabrielpreviato/py4syn
|
ac97c220d38e1aa630ff3ba4d9da030a0d3833d8
|
[
"0BSD"
] | 10
|
2015-09-02T17:30:33.000Z
|
2018-01-18T18:52:32.000Z
|
"""Dxp Class
Python Class for EPICS Fake Dxp Control.
:platform: Unix
:synopsis: Python Class for EPICS Spectro control.
.. moduleauthor:: Gabriel Fedel <gabriel.fedel@lnls.br>
.. note:: 11/30/2016 [gabrielfedel] first version released
"""
import os
import numpy as np
import h5py
from py4syn.epics.ImageHDFClass import ImageHDF
NUMPOINTS = 2048
# constants used to parse PV name
CHANNELPOSITION=3
ROIPOSITION=6
class DxpFake(ImageHDF):
# CONSTRUCTOR OF DXP CLASS
def __init__(self, mnemonic, numberOfChannels=4, numberOfRois=32,
pv=None, dxpType="mca", responseTimeout=15, output="out"):
""" Constructor
responseTimeout : how much time to wait dxp answer
"""
super().__init__(mnemonic, NUMPOINTS, output, dxpType)
self.acquiring = False
self.rois = numberOfRois
def statusChange(self, value, **kw):
"""
Helper callback used to wait for the end of the acquisition.
"""
pass
def setCountTime(self, time):
"""
Method to set the count time of a scaler device.
Parameters
----------
time : `float`
Count time to set to scaler device .
Returns
-------
out : None
"""
pass
def getCountTime(self):
pass
def getRealTime(self):
return np.random.rand()
def setCountStop(self):
pass
def getValueChannel(self, **kwargs):
"""Return intensity
channel is on format mcaC.Rr, where C is the channel and
r is the ROI"""
channel = kwargs['channel']
c = int(channel[CHANNELPOSITION]) - 1
if(len(channel) > ROIPOSITION):
return np.random.rand()
else:
self.saveSpectrum(c, **kwargs)
return 1.0
def saveSpectrum(self, ch, **kwargs):
self.spectrum = np.random.randint(100, size=(2048))
self.ch = ch
super().saveSpectrum()
def isCountRunning(self):
pass
def wait(self):
"""
Blocks until the acquisition completes.
"""
pass
def canMonitor(self):
""" Returns false indcating Dxp cannot be use as a counter monitor"""
return False
def canStopCount(self):
"""
Returns true indicating that Dxp has a stop command.
"""
return True
def getValue(self, **kwargs):
"""
This is a dummy method that always returns zero, which is part of the
:class:`py4syn.epics.ICountable` interface. Dxp does not return
a value while scanning. Instead, it stores a mca file with result .
"""
if(kwargs):
return self.getValueChannel(**kwargs)
return self.getValueChannel()
def isCounting(self):
pass
def startCount(self):
pass
def stopCount(self):
pass
def setPresetValue(self, channel, val):
"""Dummy method"""
pass
def close(self):
pass
def startCollectImage(self, rows=0, cols=0):
"""Start to collect an image
When collect an image, the points will be saved on a hdf file"""
super().startCollectImage("int32", rows, cols)
| 24.946154
| 77
| 0.591736
|
import os
import numpy as np
import h5py
from py4syn.epics.ImageHDFClass import ImageHDF
NUMPOINTS = 2048
CHANNELPOSITION=3
ROIPOSITION=6
class DxpFake(ImageHDF):
def __init__(self, mnemonic, numberOfChannels=4, numberOfRois=32,
pv=None, dxpType="mca", responseTimeout=15, output="out"):
super().__init__(mnemonic, NUMPOINTS, output, dxpType)
self.acquiring = False
self.rois = numberOfRois
def statusChange(self, value, **kw):
pass
def setCountTime(self, time):
pass
def getCountTime(self):
pass
def getRealTime(self):
return np.random.rand()
def setCountStop(self):
pass
def getValueChannel(self, **kwargs):
channel = kwargs['channel']
c = int(channel[CHANNELPOSITION]) - 1
if(len(channel) > ROIPOSITION):
return np.random.rand()
else:
self.saveSpectrum(c, **kwargs)
return 1.0
def saveSpectrum(self, ch, **kwargs):
self.spectrum = np.random.randint(100, size=(2048))
self.ch = ch
super().saveSpectrum()
def isCountRunning(self):
pass
def wait(self):
pass
def canMonitor(self):
return False
def canStopCount(self):
return True
def getValue(self, **kwargs):
if(kwargs):
return self.getValueChannel(**kwargs)
return self.getValueChannel()
def isCounting(self):
pass
def startCount(self):
pass
def stopCount(self):
pass
def setPresetValue(self, channel, val):
pass
def close(self):
pass
def startCollectImage(self, rows=0, cols=0):
super().startCollectImage("int32", rows, cols)
| true
| true
|
7906eaad81d7cc2c368c9d7248e4d0d25548bdd2
| 3,561
|
py
|
Python
|
app/src/main/cpp/openvpn3/win/build.py
|
qlcchain/WinQ-Android-code
|
e39f2043ca82c9d61df3819ca9fb3007a7870426
|
[
"MIT"
] | 5
|
2018-07-12T05:50:46.000Z
|
2021-01-11T12:28:24.000Z
|
app/src/main/cpp/openvpn3/win/build.py
|
huzhipeng111/WinQ
|
39925732597fd4822cd554429fab655e8c858c4b
|
[
"MIT"
] | 1
|
2019-07-19T02:40:32.000Z
|
2019-07-19T02:40:32.000Z
|
app/src/main/cpp/openvpn3/win/build.py
|
huzhipeng111/WinQ
|
39925732597fd4822cd554429fab655e8c858c4b
|
[
"MIT"
] | 7
|
2018-07-11T10:37:02.000Z
|
2019-08-03T10:34:08.000Z
|
#!/c/python27/python
import os
from utils import *
def cli_cpp(parms):
return os.path.join(parms['OVPN3'], "core", "test", "ovpncli", "cli.cpp")
def src_fn(parms, srcfile):
# Get source file name
if srcfile:
if '.' not in os.path.basename(srcfile):
srcfile += ".cpp"
else:
srcfile = cli_cpp(parms)
return srcfile
def is_unit_test(argv):
unit_test = False
if len(argv) >= 2:
unit_test = argv[1] == "unittest"
return unit_test
def src_fn_argv(parms, argv):
srcfile = None
if len(argv) >= 1:
srcfile = argv[0]
return src_fn(parms, srcfile)
def build(parms, srcfile, unit_test=False):
# Debug?
if parms['DEBUG']:
dbg_rel_flags = "/Zi"
else:
dbg_rel_flags = "/O2"
# Dictionary we will use to substitute parameters
# onto VC command line.
options = {
"ovpn3" : parms['OVPN3'],
"tap" : os.path.join(parms['TAP'], 'src'),
"tap_component_id" : parms['TAP_WIN_COMPONENT_ID'],
"asio" : os.path.join(build_dir(parms), "asio"),
"mbedtls" : os.path.join(build_dir(parms), "mbedtls"),
"lz4" : os.path.join(build_dir(parms), "lz4", "lib"),
"srcfile" : srcfile,
"extra_defs" : parms['CPP_EXTRA'],
"extra_inc" : "",
"extra_lib_path" : "",
"extra_lib" : "",
}
vc_parms(parms, options)
# Do we need to support XP and Win 2003?
arch = os.environ.get("ARCH", parms['ARCH'])
if arch == "x86_xp":
options['extra_defs'] += " /D_WIN32_WINNT=0x0501" # pre-Vista
else:
options['extra_defs'] += " /D_WIN32_WINNT=0x0600" # Vista and later
options['extra_lib'] += " fwpuclnt.lib"
# Add jsoncpp (optional)
if 'jsoncpp' in parms['LIB_VERSIONS']:
options["jsoncpp"] = os.path.join(build_dir(parms), "jsoncpp")
options['extra_inc'] += " /DHAVE_JSONCPP /I %(jsoncpp)s/dist" % options
options['extra_lib_path'] += " /LIBPATH:%(jsoncpp)s/dist" % options
options['extra_lib'] += " jsoncpp.lib"
if unit_test:
options['extra_lib'] += " gtest.lib"
options['extra_inc'] += " /I %s" % os.path.join(parms["GTEST_ROOT"], "googletest", "include")
options['extra_lib_path'] += " /LIBPATH:%s" % os.path.join(parms["GTEST_ROOT"], "googlemock", "gtest", "Debug")
# Build OpenVPN Connect
if parms.get("CONNECT"):
options['extra_inc'] += " /I " + os.path.join(parms['OVPN3'], "common")
# build it
vc_cmd(parms, r"cl %(extra_defs)s /DNOMINMAX /D_CRT_SECURE_NO_WARNINGS /DUSE_ASIO /DASIO_STANDALONE /DASIO_NO_DEPRECATED /I %(asio)s\asio\include /DUSE_MBEDTLS /I %(mbedtls)s\include /DHAVE_LZ4 /I %(lz4)s%(extra_inc)s -DTAP_WIN_COMPONENT_ID=%(tap_component_id)s /I %(tap)s /I %(ovpn3)s\core /EHsc %(link_static_dynamic_flags)s /W0 %(dbg_rel_flags)s /nologo %(srcfile)s /link /LIBPATH:%(mbedtls)s\library /LIBPATH:%(lz4)s%(extra_lib_path)s mbedtls.lib lz4.lib%(extra_lib)s ws2_32.lib crypt32.lib iphlpapi.lib winmm.lib user32.lib gdi32.lib advapi32.lib wininet.lib shell32.lib ole32.lib rpcrt4.lib" % options, arch=os.environ.get("ARCH"))
if __name__ == "__main__":
import sys
from parms import PARMS
# some parameters might be redefined, like in Jenkins multibranch pipeline case
PARMS['BUILD'] = os.environ.get('BUILD', PARMS['BUILD'])
PARMS['OVPN3'] = os.environ.get('OVPN3', PARMS['OVPN3'])
src = src_fn_argv(PARMS, sys.argv[1:])
unit_test = is_unit_test(sys.argv[1:])
build(PARMS, src, unit_test)
| 37.882979
| 641
| 0.624544
|
import os
from utils import *
def cli_cpp(parms):
return os.path.join(parms['OVPN3'], "core", "test", "ovpncli", "cli.cpp")
def src_fn(parms, srcfile):
if srcfile:
if '.' not in os.path.basename(srcfile):
srcfile += ".cpp"
else:
srcfile = cli_cpp(parms)
return srcfile
def is_unit_test(argv):
unit_test = False
if len(argv) >= 2:
unit_test = argv[1] == "unittest"
return unit_test
def src_fn_argv(parms, argv):
srcfile = None
if len(argv) >= 1:
srcfile = argv[0]
return src_fn(parms, srcfile)
def build(parms, srcfile, unit_test=False):
if parms['DEBUG']:
dbg_rel_flags = "/Zi"
else:
dbg_rel_flags = "/O2"
options = {
"ovpn3" : parms['OVPN3'],
"tap" : os.path.join(parms['TAP'], 'src'),
"tap_component_id" : parms['TAP_WIN_COMPONENT_ID'],
"asio" : os.path.join(build_dir(parms), "asio"),
"mbedtls" : os.path.join(build_dir(parms), "mbedtls"),
"lz4" : os.path.join(build_dir(parms), "lz4", "lib"),
"srcfile" : srcfile,
"extra_defs" : parms['CPP_EXTRA'],
"extra_inc" : "",
"extra_lib_path" : "",
"extra_lib" : "",
}
vc_parms(parms, options)
arch = os.environ.get("ARCH", parms['ARCH'])
if arch == "x86_xp":
options['extra_defs'] += " /D_WIN32_WINNT=0x0501"
else:
options['extra_defs'] += " /D_WIN32_WINNT=0x0600"
options['extra_lib'] += " fwpuclnt.lib"
if 'jsoncpp' in parms['LIB_VERSIONS']:
options["jsoncpp"] = os.path.join(build_dir(parms), "jsoncpp")
options['extra_inc'] += " /DHAVE_JSONCPP /I %(jsoncpp)s/dist" % options
options['extra_lib_path'] += " /LIBPATH:%(jsoncpp)s/dist" % options
options['extra_lib'] += " jsoncpp.lib"
if unit_test:
options['extra_lib'] += " gtest.lib"
options['extra_inc'] += " /I %s" % os.path.join(parms["GTEST_ROOT"], "googletest", "include")
options['extra_lib_path'] += " /LIBPATH:%s" % os.path.join(parms["GTEST_ROOT"], "googlemock", "gtest", "Debug")
if parms.get("CONNECT"):
options['extra_inc'] += " /I " + os.path.join(parms['OVPN3'], "common")
vc_cmd(parms, r"cl %(extra_defs)s /DNOMINMAX /D_CRT_SECURE_NO_WARNINGS /DUSE_ASIO /DASIO_STANDALONE /DASIO_NO_DEPRECATED /I %(asio)s\asio\include /DUSE_MBEDTLS /I %(mbedtls)s\include /DHAVE_LZ4 /I %(lz4)s%(extra_inc)s -DTAP_WIN_COMPONENT_ID=%(tap_component_id)s /I %(tap)s /I %(ovpn3)s\core /EHsc %(link_static_dynamic_flags)s /W0 %(dbg_rel_flags)s /nologo %(srcfile)s /link /LIBPATH:%(mbedtls)s\library /LIBPATH:%(lz4)s%(extra_lib_path)s mbedtls.lib lz4.lib%(extra_lib)s ws2_32.lib crypt32.lib iphlpapi.lib winmm.lib user32.lib gdi32.lib advapi32.lib wininet.lib shell32.lib ole32.lib rpcrt4.lib" % options, arch=os.environ.get("ARCH"))
if __name__ == "__main__":
import sys
from parms import PARMS
PARMS['BUILD'] = os.environ.get('BUILD', PARMS['BUILD'])
PARMS['OVPN3'] = os.environ.get('OVPN3', PARMS['OVPN3'])
src = src_fn_argv(PARMS, sys.argv[1:])
unit_test = is_unit_test(sys.argv[1:])
build(PARMS, src, unit_test)
| true
| true
|
7906ecc8a15fb85fc44372e951a1e8533503a994
| 353
|
py
|
Python
|
algorithms_and_data_structures/algorithms/string_processing/is_palindrome/test_is_palindrome.py
|
IngCarlosPedroza/algorithms-and-data-structures-py
|
435aea7a703067c008001cd04e7f101dd6aff190
|
[
"MIT"
] | 2
|
2022-01-14T01:33:24.000Z
|
2022-01-14T03:23:41.000Z
|
algorithms_and_data_structures/algorithms/string_processing/is_palindrome/test_is_palindrome.py
|
IngCarlosPedroza/algorithms-and-data-structures-py
|
435aea7a703067c008001cd04e7f101dd6aff190
|
[
"MIT"
] | 1
|
2022-01-14T03:26:58.000Z
|
2022-01-14T03:26:58.000Z
|
algorithms_and_data_structures/algorithms/string_processing/is_palindrome/test_is_palindrome.py
|
IngCarlosPedroza/algorithms-and-data-structures-py
|
435aea7a703067c008001cd04e7f101dd6aff190
|
[
"MIT"
] | 1
|
2022-01-14T03:23:45.000Z
|
2022-01-14T03:23:45.000Z
|
from . import is_palindrome
test_subjects = [
is_palindrome
]
complex_pali = '''Anita. .laVa,
:; la?
TINa!'''
def test_is_palindrome():
for subject in test_subjects:
assert subject.algorithm('')
assert subject.algorithm(' ')
assert subject.algorithm(complex_pali)
assert not subject.algorithm('Nope')
| 19.611111
| 46
| 0.648725
|
from . import is_palindrome
test_subjects = [
is_palindrome
]
complex_pali = '''Anita. .laVa,
:; la?
TINa!'''
def test_is_palindrome():
for subject in test_subjects:
assert subject.algorithm('')
assert subject.algorithm(' ')
assert subject.algorithm(complex_pali)
assert not subject.algorithm('Nope')
| true
| true
|
7906ee715bc351c3d4cefb014bed730332642ace
| 56,888
|
py
|
Python
|
Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py
|
shubhamyadav-coditas/content
|
9f53434e67eaaf45b5f13a132ce86246842185a9
|
[
"MIT"
] | 1
|
2021-08-07T00:21:58.000Z
|
2021-08-07T00:21:58.000Z
|
Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py
|
shubhamyadav-coditas/content
|
9f53434e67eaaf45b5f13a132ce86246842185a9
|
[
"MIT"
] | 1
|
2022-01-19T13:41:51.000Z
|
2022-01-19T15:00:05.000Z
|
Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py
|
shubhamyadav-coditas/content
|
9f53434e67eaaf45b5f13a132ce86246842185a9
|
[
"MIT"
] | 1
|
2021-01-05T12:20:30.000Z
|
2021-01-05T12:20:30.000Z
|
"""HelloWorld Integration for Cortex XSOAR (aka Demisto)
This integration is a good example on you can build a Cortex XSOAR Integration
using Python 3. Please follow the documentation links below and make sure that
your integration follows the Code Conventions and passes the Linting phase.
Developer Documentation: https://xsoar.pan.dev/docs/welcome
Code Conventions: https://xsoar.pan.dev/docs/integrations/code-conventions
Linting: https://xsoar.pan.dev/docs/integrations/linting
When building a Cortex XSOAR integration that is reusable, a lot of effort
must be placed in the design. We recommend to fill a Design Document template,
that allows you to capture Use Cases, Requirements and Inputs/Outputs.
Example Design document for the this Integration (HelloWorld):
https://docs.google.com/document/d/1wETtBEKg37PHNU8tYeB56M1LE314ux086z3HFeF_cX0
HelloWorld API
--------------
The HelloWorld API is a simple API that shows a realistic use case for an XSOAR
integration. It's actually a real API that is available to the following URL:
https://soar.mastersofhack.com - if you need an API Key to test it out please
reach out to your Cortex XSOAR contacts.
This API has a few basic functions:
- Alerts: the endpoint returns mocked alerts and allows you to search based on
a number of parameters, such as state (ACTIVE or CLOSED), type, timestamp. It
can also return a single alert by ID. This is used to create new Incidents in
XSOAR by using the ``fetch-incidents`` command, which is by default invoked
every minute.
There is also an endpoint that allows to retrieve additional details about a
specific alert by ID, and one to change the alert status to "CLOSED" once
it has been resolved.
- Reputation (ip and domain): these endpoints return, for an IP and
domain respectively, a WHOIS lookup of the entity as well as a reputation score
(from 0 to 100) that is used to determine whether the entity is malicious. This
endpoint is called by XSOAR reputation commands ``ip`` and ``domain`` that
are run automatically every time an indicator is extracted in XSOAR. As a best
practice of design, it is important to map and document the mapping between
a score in the original API format (0 to 100 in this case) to a score in XSOAR
format (0 to 3). This score is called ``DBotScore``, and is returned in the
context to allow automated handling of indicators based on their reputation.
More information: https://xsoar.pan.dev/docs/integrations/dbot
- Scan: to demonstrate how to run commands that are not returning instant data,
the API provides a scan endpoint that simulates scanning a host and generating
a report after the scan is completed. The API has endpoints to start a scan,
which returns a job ID, poll for the scan status and, if the scan is completed,
retrieved the job results.
This function is used in conjunction of the HelloWorld Scan playbook that uses
the GenericPolling mechanism to implement the job polling loop. The results
can be returned in JSON or attachment file format.
Info on GenericPolling: https://xsoar.pan.dev/docs/playbooks/generic-polling
Please check the HelloWorld Design Document referenced above for details about
the raw API responsens as well as the design details for this integration.
This integration also has a ``say-hello`` command for backward compatibility,
that doesn't connect to an API and just returns a ``Hello {name}`` string,
where name is the input value provided.
Integration File Structure
--------------------------
An integration usually consists of the following parts:
- Imports
- Constants
- Client Class
- Helper Functions
- Command Functions
- Main Function
- Entry Point
Imports
-------
Here you can import Python module you need for your integration. If you need
a module that is not part of the default XSOAR Docker images, you can add
a custom one. More details: https://xsoar.pan.dev/docs/integrations/docker
There are also internal imports that are used by XSOAR:
- demistomock (imported as demisto): allows your code to work offline for
testing. The actual ``demisto`` module is provided at runtime when the
code runs in XSOAR.
- CommonServerPython.py: contains a set of helper functions, base classes
and other useful components that will make your integration code easier
to maintain.
- CommonServerUserPython.py: includes a set of user defined commands that
are specific to an XSOAR installation. Do not use it for integrations that
are meant to be shared externally.
These imports are automatically loaded at runtime within the XSOAR script
runner, so you shouldn't modify them
Constants
---------
Usually some constants that do not require user parameters or inputs, such
as the default API entry point for your service, or the maximum numbers of
incidents to fetch every time.
Client Class
------------
We recommend to use a Client class to wrap all the code that needs to interact
with your API. Moreover, we recommend, when possible, to inherit from the
BaseClient class, defined in CommonServerPython.py. This class already handles
a lot of the work, such as system proxy settings, SSL certificate verification
and exception handling for HTTP errors.
Note that the Client class should NOT contain any Cortex XSOAR specific code,
i.e. it shouldn't use anything in the ``demisto`` class (functions such as
``demisto.args()`` or ``demisto.results()`` or even ``return_results`` and
``return_error``.
You will use the Command Functions to handle XSOAR inputs and outputs.
When calling an API, you should use the ``_http.request()`` method and you
can return the raw data to the calling function (usually a Command function).
You should usually have one function for each API endpoint.
Look at the code and the commends of this specific class to better understand
the implementation details.
Helper Functions
----------------
Helper functions are usually used as utility functions that are used by several
command functions throughout your code. For example they map arguments to types
or convert severity formats from integration-specific to XSOAR.
Many helper functions are already defined in ``CommonServerPython.py`` and are
often very handy.
Command Functions
-----------------
Command functions perform the mapping between XSOAR inputs and outputs to the
Client class functions inputs and outputs. As a best practice, they shouldn't
contain calls to ``demisto.args()``, ``demisto.results()``, ``return_error``
and ``demisto.command()`` as those should be handled through the ``main()``
function.
However, in command functions, use ``demisto`` or ``CommonServerPython.py``
artifacts, such as ``demisto.debug()`` or the ``CommandResults`` class and the
``Common.*`` classes.
Usually you will have one command function for every specific XSOAR command
you want to implement in your integration, plus ``test-module``,
``fetch-incidents`` and ``fetch-indicators``(if the latter two are supported
by your integration). Each command function should invoke one specific function
of the Client class.
Command functions, when invoked through an XSOAR command usually return data
using the ``CommandResults`` class, that is then passed to ``return_results()``
in the ``main()`` function.
``return_results()`` is defined in ``CommonServerPython.py`` to return
the data to XSOAR. ``return_results()`` actually wraps ``demisto.results()``.
You should never use ``demisto.results()`` directly.
Sometimes you will need to return values in a format that is not compatible
with ``CommandResults`` (for example files): in that case you must return a
data structure that is then pass passed to ``return.results()``. (i.e.
check the ``scan_results_command`` function in this file that has the option
to return a file to Cortex XSOAR).
In any case you should never call ``return_results()`` directly from the
command functions.
When you use create the CommandResults object in command functions, you
usually pass some types of data:
- Human Readable: usually in Markdown format. This is what is presented to the
analyst in the War Room. You can use ``tableToMarkdown()``, defined in
``CommonServerPython.py``, to convert lists and dicts in Markdown and pass it
to ``return_results()`` using the ``readable_output`` argument, or the
``return_results()`` function will call ``tableToMarkdown()`` automatically for
you.
- Context Output: this is the machine readable data, JSON based, that XSOAR can
parse and manage in the Playbooks or Incident's War Room. The Context Output
fields should be defined in your integration YML file and is important during
the design phase. Make sure you define the format and follow best practices.
You can use ``demisto-sdk json-to-outputs`` to autogenerate the YML file
outputs section. Context output is passed as the ``outputs`` argument in ``demisto_results()``,
and the prefix (i.e. ``HelloWorld.Alert``) is passed via the ``outputs_prefix``
argument.
More information on Context Outputs, Standards, DBotScore and demisto-sdk:
https://xsoar.pan.dev/docs/integrations/code-conventions#outputs
https://xsoar.pan.dev/docs/integrations/context-and-outputs
https://xsoar.pan.dev/docs/integrations/context-standards
https://xsoar.pan.dev/docs/integrations/dbot
https://github.com/demisto/demisto-sdk/blob/master/demisto_sdk/commands/json_to_outputs/README.md
Also, when you write data in the Context, you want to make sure that if you
return updated information for an entity, to update it and not append to
the list of entities (i.e. in HelloWorld you want to update the status of an
existing ``HelloWorld.Alert`` in the context when you retrieve it, rather than
adding a new one if you already retrieved it). To update data in the Context,
you can define which is the key attribute to use, such as (using the example):
``outputs_key_field='alert_id'``. This means that you are using the ``alert_id``
key to determine whether adding a new entry in the context or updating an
existing one that has the same ID. You can look at the examples to understand
how it works.
More information here:
https://xsoar.pan.dev/docs/integrations/context-and-outputs
https://xsoar.pan.dev/docs/integrations/code-conventions#outputs
https://xsoar.pan.dev/docs/integrations/dt
- Raw Output: this is usually the raw result from your API and is used for
troubleshooting purposes or for invoking your command from Automation Scripts.
If not specified, ``return_results()`` will use the same data as ``outputs``.
Main Function
-------------
The ``main()`` function takes care of reading the integration parameters via
the ``demisto.params()`` function, initializes the Client class and checks the
different options provided to ``demisto.commands()``, to invoke the correct
command function passing to it ``demisto.args()`` and returning the data to
``return_results()``. If implemented, ``main()`` also invokes the function
``fetch_incidents()``with the right parameters and passes the outputs to the
``demisto.incidents()`` function. ``main()`` also catches exceptions and
returns an error message via ``return_error()``.
Entry Point
-----------
This is the integration code entry point. It checks whether the ``__name__``
variable is ``__main__`` , ``__builtin__`` (for Python 2) or ``builtins`` (for
Python 3) and then calls the ``main()`` function. Just keep this convention.
"""
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import json
import urllib3
import dateparser
import traceback
from typing import Any, Dict, Tuple, List, Optional, Union, cast
# Disable insecure warnings
urllib3.disable_warnings()
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
MAX_INCIDENTS_TO_FETCH = 50
HELLOWORLD_SEVERITIES = ['Low', 'Medium', 'High', 'Critical']
''' CLIENT CLASS '''
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any Demisto logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
For this HelloWorld implementation, no special attributes defined
"""
def get_ip_reputation(self, ip: str) -> Dict[str, Any]:
"""Gets the IP reputation using the '/ip' API endpoint
:type ip: ``str``
:param ip: IP address to get the reputation for
:return: dict containing the IP reputation as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/ip',
params={
'ip': ip
}
)
def get_domain_reputation(self, domain: str) -> Dict[str, Any]:
"""Gets the Domain reputation using the '/domain' API endpoint
:type domain: ``str``
:param domain: domain name to get the reputation for
:return: dict containing the domain reputation as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/domain',
params={
'domain': domain
}
)
def search_alerts(self, alert_status: Optional[str], severity: Optional[str],
alert_type: Optional[str], max_results: Optional[int],
start_time: Optional[int]) -> List[Dict[str, Any]]:
"""Searches for HelloWorld alerts using the '/get_alerts' API endpoint
All the parameters are passed directly to the API as HTTP POST parameters in the request
:type alert_status: ``Optional[str]``
:param alert_status: status of the alert to search for. Options are: 'ACTIVE' or 'CLOSED'
:type severity: ``Optional[str]``
:param severity:
severity of the alert to search for. Comma-separated values.
Options are: "Low", "Medium", "High", "Critical"
:type alert_type: ``Optional[str]``
:param alert_type: type of alerts to search for. There is no list of predefined types
:type max_results: ``Optional[int]``
:param max_results: maximum number of results to return
:type start_time: ``Optional[int]``
:param start_time: start timestamp (epoch in seconds) for the alert search
:return: list containing the found HelloWorld alerts as dicts
:rtype: ``List[Dict[str, Any]]``
"""
request_params: Dict[str, Any] = {}
if alert_status:
request_params['alert_status'] = alert_status
if alert_type:
request_params['alert_type'] = alert_type
if severity:
request_params['severity'] = severity
if max_results:
request_params['max_results'] = max_results
if start_time:
request_params['start_time'] = start_time
return self._http_request(
method='GET',
url_suffix='/get_alerts',
params=request_params
)
def get_alert(self, alert_id: str) -> Dict[str, Any]:
"""Gets a specific HelloWorld alert by id
:type alert_id: ``str``
:param alert_id: id of the alert to return
:return: dict containing the alert as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/get_alert_details',
params={
'alert_id': alert_id
}
)
def update_alert_status(self, alert_id: str, alert_status: str) -> Dict[str, Any]:
"""Changes the status of a specific HelloWorld alert
:type alert_id: ``str``
:param alert_id: id of the alert to return
:type alert_status: ``str``
:param alert_status: new alert status. Options are: 'ACTIVE' or 'CLOSED'
:return: dict containing the alert as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/change_alert_status',
params={
'alert_id': alert_id,
'alert_status': alert_status
}
)
def scan_start(self, hostname: str) -> Dict[str, Any]:
"""Starts a HelloWorld scan on a specific hostname
:type hostname: ``str``
:param hostname: hostname of the machine to scan
:return: dict containing the scan status as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/start_scan',
params={
'hostname': hostname
}
)
def scan_status(self, scan_id: str) -> Dict[str, Any]:
"""Gets the status of a HelloWorld scan
:type scan_id: ``str``
:param scan_id: ID of the scan to retrieve status for
:return: dict containing the scan status as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/check_scan',
params={
'scan_id': scan_id
}
)
def scan_results(self, scan_id: str) -> Dict[str, Any]:
"""Gets the results of a HelloWorld scan
:type scan_id: ``str``
:param scan_id: ID of the scan to retrieve results for
:return: dict containing the scan results as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/get_scan_results',
params={
'scan_id': scan_id
}
)
def say_hello(self, name: str) -> str:
"""Returns 'Hello {name}'
:type name: ``str``
:param name: name to append to the 'Hello' string
:return: string containing 'Hello {name}'
:rtype: ``str``
"""
return f'Hello {name}'
''' HELPER FUNCTIONS '''
def parse_domain_date(domain_date: Union[List[str], str], date_format: str = '%Y-%m-%dT%H:%M:%S.000Z') -> Optional[str]:
"""Converts whois date format to an ISO8601 string
Converts the HelloWorld domain WHOIS date (YYYY-mm-dd HH:MM:SS) format
in a datetime. If a list is returned with multiple elements, takes only
the first one.
:type domain_date: ``Union[List[str],str]``
:param date_format:
a string or list of strings with the format 'YYYY-mm-DD HH:MM:SS'
:return: Parsed time in ISO8601 format
:rtype: ``Optional[str]``
"""
if isinstance(domain_date, str):
# if str parse the value
domain_date_dt = dateparser.parse(domain_date)
if domain_date_dt:
return domain_date_dt.strftime(date_format)
elif isinstance(domain_date, list) and len(domain_date) > 0 and isinstance(domain_date[0], str):
# if list with at least one element, parse the first element
domain_date_dt = dateparser.parse(domain_date[0])
if domain_date_dt:
return domain_date_dt.strftime(date_format)
# in any other case return nothing
return None
def convert_to_demisto_severity(severity: str) -> int:
"""Maps HelloWorld severity to Cortex XSOAR severity
Converts the HelloWorld alert severity level ('Low', 'Medium',
'High', 'Critical') to Cortex XSOAR incident severity (1 to 4)
for mapping.
:type severity: ``str``
:param severity: severity as returned from the HelloWorld API (str)
:return: Cortex XSOAR Severity (1 to 4)
:rtype: ``int``
"""
# In this case the mapping is straightforward, but more complex mappings
# might be required in your integration, so a dedicated function is
# recommended. This mapping should also be documented.
return {
'Low': IncidentSeverity.LOW,
'Medium': IncidentSeverity.MEDIUM,
'High': IncidentSeverity.HIGH,
'Critical': IncidentSeverity.CRITICAL
}[severity]
''' COMMAND FUNCTIONS '''
def test_module(client: Client, first_fetch_time: int) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: HelloWorld client to use
:type name: ``str``
:param name: name to append to the 'Hello' string
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
# INTEGRATION DEVELOPER TIP
# Client class should raise the exceptions, but if the test fails
# the exception text is printed to the Cortex XSOAR UI.
# If you have some specific errors you want to capture (i.e. auth failure)
# you should catch the exception here and return a string with a more
# readable output (for example return 'Authentication Error, API Key
# invalid').
# Cortex XSOAR will print everything you return different than 'ok' as
# an error
try:
client.search_alerts(max_results=1, start_time=first_fetch_time, alert_status=None, alert_type=None,
severity=None)
except DemistoException as e:
if 'Forbidden' in str(e):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return 'ok'
def say_hello_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-say-hello command: Returns Hello {somename}
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``str``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['name']`` is used as input name
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains the hello world message
:rtype: ``CommandResults``
"""
# INTEGRATION DEVELOPER TIP
# In this case 'name' is an argument set in the HelloWorld.yml file as mandatory,
# so the null check here as XSOAR will always check it before your code is called.
# Although it's not mandatory to check, you are welcome to do so.
name = args.get('name', None)
if not name:
raise ValueError('name not specified')
# Call the Client function and get the raw response
result = client.say_hello(name)
# Create the human readable output.
# It will be in markdown format - https://www.markdownguide.org/basic-syntax/
# More complex output can be formatted using ``tableToMarkDown()`` defined
# in ``CommonServerPython.py``
readable_output = f'## {result}'
# More information about Context:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# We return a ``CommandResults`` object, and we want to pass a custom
# markdown here, so the argument ``readable_output`` is explicit. If not
# passed, ``CommandResults``` will do a ``tableToMarkdown()`` do the data
# to generate the readable output.
return CommandResults(
readable_output=readable_output,
outputs_prefix='hello',
outputs_key_field='',
outputs=result
)
def fetch_incidents(client: Client, max_results: int, last_run: Dict[str, int],
first_fetch_time: Optional[int], alert_status: Optional[str],
min_severity: str, alert_type: Optional[str]
) -> Tuple[Dict[str, int], List[dict]]:
"""This function retrieves new alerts every interval (default is 1 minute).
This function has to implement the logic of making sure that incidents are
fetched only onces and no incidents are missed. By default it's invoked by
XSOAR every minute. It will use last_run to save the timestamp of the last
incident it processed. If last_run is not provided, it should use the
integration parameter first_fetch_time to determine when to start fetching
the first time.
:type client: ``Client``
:param Client: HelloWorld client to use
:type max_results: ``int``
:param max_results: Maximum numbers of incidents per fetch
:type last_run: ``Optional[Dict[str, int]]``
:param last_run:
A dict with a key containing the latest incident created time we got
from last fetch
:type first_fetch_time: ``Optional[int]``
:param first_fetch_time:
If last_run is None (first time we are fetching), it contains
the timestamp in milliseconds on when to start fetching incidents
:type alert_status: ``Optional[str]``
:param alert_status:
status of the alert to search for. Options are: 'ACTIVE'
or 'CLOSED'
:type min_severity: ``str``
:param min_severity:
minimum severity of the alert to search for.
Options are: "Low", "Medium", "High", "Critical"
:type alert_type: ``Optional[str]``
:param alert_type:
type of alerts to search for. There is no list of predefined types
:return:
A tuple containing two elements:
next_run (``Dict[str, int]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
:rtype: ``Tuple[Dict[str, int], List[dict]]``
"""
# Get the last fetch time, if exists
# last_run is a dict with a single key, called last_fetch
last_fetch = last_run.get('last_fetch', None)
# Handle first fetch time
if last_fetch is None:
# if missing, use what provided via first_fetch_time
last_fetch = first_fetch_time
else:
# otherwise use the stored last fetch
last_fetch = int(last_fetch)
# for type checking, making sure that latest_created_time is int
latest_created_time = cast(int, last_fetch)
# Initialize an empty list of incidents to return
# Each incident is a dict with a string as a key
incidents: List[Dict[str, Any]] = []
# Get the CSV list of severities from min_severity
severity = ','.join(HELLOWORLD_SEVERITIES[HELLOWORLD_SEVERITIES.index(min_severity):])
alerts = client.search_alerts(
alert_type=alert_type,
alert_status=alert_status,
max_results=max_results,
start_time=last_fetch,
severity=severity
)
for alert in alerts:
# If no created_time set is as epoch (0). We use time in ms so we must
# convert it from the HelloWorld API response
incident_created_time = int(alert.get('created', '0'))
incident_created_time_ms = incident_created_time * 1000
# to prevent duplicates, we are only adding incidents with creation_time > last fetched incident
if last_fetch:
if incident_created_time <= last_fetch:
continue
# If no name is present it will throw an exception
incident_name = alert['name']
# INTEGRATION DEVELOPER TIP
# The incident dict is initialized with a few mandatory fields:
# name: the incident name
# occurred: the time on when the incident occurred, in ISO8601 format
# we use timestamp_to_datestring() from CommonServerPython.py to
# handle the conversion.
# rawJSON: everything else is packed in a string via json.dumps()
# and is included in rawJSON. It will be used later for classification
# and mapping inside XSOAR.
# severity: it's not mandatory, but is recommended. It must be
# converted to XSOAR specific severity (int 1 to 4)
# Note that there are other fields commented out here. You can do some
# mapping of fields (either out of the box fields, like "details" and
# "type") or custom fields (like "helloworldid") directly here in the
# code, or they can be handled in the classification and mapping phase.
# In either case customers can override them. We leave the values
# commented out here, but you can use them if you want.
incident = {
'name': incident_name,
# 'details': alert['name'],
'occurred': timestamp_to_datestring(incident_created_time_ms),
'rawJSON': json.dumps(alert),
# 'type': 'Hello World Alert', # Map to a specific XSOAR incident Type
'severity': convert_to_demisto_severity(alert.get('severity', 'Low')),
# 'CustomFields': { # Map specific XSOAR Custom Fields
# 'helloworldid': alert.get('alert_id'),
# 'helloworldstatus': alert.get('alert_status'),
# 'helloworldtype': alert.get('alert_type')
# }
}
incidents.append(incident)
# Update last run and add incident if the incident is newer than last fetch
if incident_created_time > latest_created_time:
latest_created_time = incident_created_time
# Save the next_run as a dict with the last_fetch key to be stored
next_run = {'last_fetch': latest_created_time}
return next_run, incidents
def ip_reputation_command(client: Client, args: Dict[str, Any], default_threshold: int) -> List[CommandResults]:
"""ip command: Returns IP reputation for a list of IPs
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['ip']`` is a list of IPs or a single IP
``args['threshold']`` threshold to determine whether an IP is malicious
:type default_threshold: ``int``
:param default_threshold:
default threshold to determine whether an IP is malicious
if threshold is not specified in the XSOAR arguments
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains IPs
:rtype: ``CommandResults``
"""
# INTEGRATION DEVELOPER TIP
# Reputation commands usually support multiple inputs (i.e. arrays), so
# they can be invoked once in XSOAR. In this case the API supports a single
# IP at a time, so we will cycle this for all the members of the array.
# We use argToList(), implemented in CommonServerPython.py to automatically
# return a list of a single element even if the provided input is a scalar.
ips = argToList(args.get('ip'))
if len(ips) == 0:
raise ValueError('IP(s) not specified')
# It's a good practice to document the threshold you use to determine
# if a score is malicious in your integration documentation.
# Thresholds should also be possible to override, as in this case,
# where threshold is an actual argument of the command.
threshold = int(args.get('threshold', default_threshold))
# Initialize an empty list of CommandResults to return
# each CommandResult will contain context standard for IP
command_results: List[CommandResults] = []
for ip in ips:
ip_data = client.get_ip_reputation(ip)
ip_data['ip'] = ip
# HelloWorld score to XSOAR reputation mapping
# See: https://xsoar.pan.dev/docs/integrations/dbot
# We are using Common.DBotScore as macros to simplify
# the mapping.
score = 0
reputation = int(ip_data.get('score', 0))
if reputation == 0:
score = Common.DBotScore.NONE # unknown
elif reputation >= threshold:
score = Common.DBotScore.BAD # bad
elif reputation >= threshold / 2:
score = Common.DBotScore.SUSPICIOUS # suspicious
else:
score = Common.DBotScore.GOOD # good
# The context is bigger here than other commands, as it consists in 3
# parts: the vendor-specific context (HelloWorld), the standard-context
# (IP) and the DBotScore.
# More information:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# https://xsoar.pan.dev/docs/integrations/context-standards
# https://xsoar.pan.dev/docs/integrations/dbot
# Also check the HelloWorld Design Document
# Create the DBotScore structure first using the Common.DBotScore class.
dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name='HelloWorld',
score=score,
malicious_description=f'Hello World returned reputation {reputation}'
)
# Create the IP Standard Context structure using Common.IP and add
# dbot_score to it.
ip_standard_context = Common.IP(
ip=ip,
asn=ip_data.get('asn'),
dbot_score=dbot_score
)
# INTEGRATION DEVELOPER TIP
# In the integration specific Context output (HelloWorld.IP) in this
# example you want to provide a lot of information as it can be used
# programmatically from within Cortex XSOAR in playbooks and commands.
# On the other hand, this API is way to verbose, so we want to select
# only certain keys to be returned in order not to clog the context
# with useless information. What to actually return in the context and
# to define as a command output is subject to design considerations.
# INTEGRATION DEVELOPER TIP
# To generate the Context Outputs on the YML use ``demisto-sdk``'s
# ``json-to-outputs`` option.
# Define which fields we want to exclude from the context output as
# they are too verbose.
ip_context_excluded_fields = ['objects', 'nir']
ip_data = {k: ip_data[k] for k in ip_data if k not in ip_context_excluded_fields}
# In this case we want to use an custom markdown to specify the table title,
# but otherwise ``CommandResults()`` will call ``tableToMarkdown()``
# automatically
readable_output = tableToMarkdown('IP', ip_data)
# INTEGRATION DEVELOPER TIP
# The output key will be ``HelloWorld.IP``, using ``ip`` as the key field.
# ``indicator`` is used to provide the context standard (IP)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.IP',
outputs_key_field='ip',
outputs=ip_data,
indicator=ip_standard_context
))
return command_results
def domain_reputation_command(client: Client, args: Dict[str, Any], default_threshold: int) -> List[CommandResults]:
"""domain command: Returns domain reputation for a list of domains
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['domain']`` list of domains or a single domain
``args['threshold']`` threshold to determine whether a domain is malicious
:type default_threshold: ``int``
:param default_threshold:
default threshold to determine whether an domain is malicious
if threshold is not specified in the XSOAR arguments
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains Domains
:rtype: ``CommandResults``
"""
# INTEGRATION DEVELOPER TIP
# Reputation commands usually support multiple inputs (i.e. arrays), so
# they can be invoked once in XSOAR. In this case the API supports a single
# IP at a time, so we will cycle this for all the members of the array.
# We use argToList(), implemented in CommonServerPython.py to automatically
# return a list of a single element even if the provided input is a scalar.
domains = argToList(args.get('domain'))
if len(domains) == 0:
raise ValueError('domain(s) not specified')
threshold = int(args.get('threshold', default_threshold))
# Initialize an empty list of CommandResults to return,
# each CommandResult will contain context standard for Domain
command_results: List[CommandResults] = []
for domain in domains:
domain_data = client.get_domain_reputation(domain)
domain_data['domain'] = domain
# INTEGRATION DEVELOPER TIP
# We want to convert the dates to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
if 'creation_date' in domain_data:
domain_data['creation_date'] = parse_domain_date(domain_data['creation_date'])
if 'expiration_date' in domain_data:
domain_data['expiration_date'] = parse_domain_date(domain_data['expiration_date'])
if 'updated_date' in domain_data:
domain_data['updated_date'] = parse_domain_date(domain_data['updated_date'])
# HelloWorld score to XSOAR reputation mapping
# See: https://xsoar.pan.dev/docs/integrations/dbot
# We are using Common.DBotScore as macros to simplify
# the mapping.
score = 0
reputation = int(domain_data.get('score', 0))
if reputation == 0:
score = Common.DBotScore.NONE # unknown
elif reputation >= threshold:
score = Common.DBotScore.BAD # bad
elif reputation >= threshold / 2:
score = Common.DBotScore.SUSPICIOUS # suspicious
else:
score = Common.DBotScore.GOOD # good
# INTEGRATION DEVELOPER TIP
# The context is bigger here than other commands, as it consists in 3
# parts: the vendor-specific context (HelloWorld), the standard-context
# (Domain) and the DBotScore.
# More information:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# https://xsoar.pan.dev/docs/integrations/context-standards
# https://xsoar.pan.dev/docs/integrations/dbot
# Also check the sample Design Document
dbot_score = Common.DBotScore(
indicator=domain,
integration_name='HelloWorld',
indicator_type=DBotScoreType.DOMAIN,
score=score,
malicious_description=f'Hello World returned reputation {reputation}'
)
# Create the Domain Standard Context structure using Common.Domain and
# add dbot_score to it.
domain_standard_context = Common.Domain(
domain=domain,
creation_date=domain_data.get('creation_date', None),
expiration_date=domain_data.get('expiration_date', None),
updated_date=domain_data.get('updated_date', None),
organization=domain_data.get('org', None),
name_servers=domain_data.get('name_servers', None),
registrant_name=domain_data.get('name', None),
registrant_country=domain_data.get('country', None),
registrar_name=domain_data.get('registrar', None),
dbot_score=dbot_score
)
# In this case we want to use an custom markdown to specify the table title,
# but otherwise ``CommandResults()`` will call ``tableToMarkdown()``
# automatically
readable_output = tableToMarkdown('Domain', domain_data)
# INTEGRATION DEVELOPER TIP
# The output key will be ``HelloWorld.Domain``, using ``domain`` as the key
# field.
# ``indicator`` is used to provide the context standard (Domain)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Domain',
outputs_key_field='domain',
outputs=domain_data,
indicator=domain_standard_context
))
return command_results
def search_alerts_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-search-alerts command: Search alerts in HelloWorld
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['status']`` alert status. Options are 'ACTIVE' or 'CLOSED'
``args['severity']`` alert severity CSV
``args['alert_type']`` alert type
``args['start_time']`` start time as ISO8601 date or seconds since epoch
``args['max_results']`` maximum number of results to return
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains alerts
:rtype: ``CommandResults``
"""
status = args.get('status')
# Check if severity contains allowed values, use all if default
severities: List[str] = HELLOWORLD_SEVERITIES
severity = args.get('severity', None)
if severity:
severities = severity.split(',')
if not all(s in HELLOWORLD_SEVERITIES for s in severities):
raise ValueError(
f'severity must be a comma-separated value '
f'with the following options: {",".join(HELLOWORLD_SEVERITIES)}')
alert_type = args.get('alert_type')
# Convert the argument to a timestamp using helper function
start_time = arg_to_datetime(
arg=args.get('start_time'),
arg_name='start_time',
required=False
)
# Convert the argument to an int using helper function
max_results = arg_to_number(
arg=args.get('max_results'),
arg_name='max_results',
required=False
)
# Severity is passed to the API as a CSV
alerts = client.search_alerts(
severity=','.join(severities),
alert_status=status,
alert_type=alert_type,
start_time=int(start_time.timestamp()) if start_time else None,
max_results=max_results
)
# INTEGRATION DEVELOPER TIP
# We want to convert the "created" time from timestamp(s) to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
for alert in alerts:
if 'created' not in alert:
continue
created_time_ms = int(alert.get('created', '0')) * 1000
alert['created'] = timestamp_to_datestring(created_time_ms)
# in this example we are not providing a custom markdown, we will
# let ``CommandResults`` generate it by default.
return CommandResults(
outputs_prefix='HelloWorld.Alert',
outputs_key_field='alert_id',
outputs=alerts
)
def get_alert_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-get-alert command: Returns a HelloWorld alert
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['alert_id']`` alert ID to return
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains an alert
:rtype: ``CommandResults``
"""
alert_id = args.get('alert_id', None)
if not alert_id:
raise ValueError('alert_id not specified')
alert = client.get_alert(alert_id=alert_id)
# INTEGRATION DEVELOPER TIP
# We want to convert the "created" time from timestamp(s) to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
if 'created' in alert:
created_time_ms = int(alert.get('created', '0')) * 1000
alert['created'] = timestamp_to_datestring(created_time_ms)
# tableToMarkdown() is defined is CommonServerPython.py and is used very
# often to convert lists and dicts into a human readable format in markdown
readable_output = tableToMarkdown(f'HelloWorld Alert {alert_id}', alert)
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Alert',
outputs_key_field='alert_id',
outputs=alert
)
def update_alert_status_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-update-alert-status command: Changes the status of an alert
Changes the status of a HelloWorld alert and returns the updated alert info
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['alert_id']`` alert ID to update
``args['status']`` new status, either ACTIVE or CLOSED
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains an updated alert
:rtype: ``CommandResults``
"""
alert_id = args.get('alert_id', None)
if not alert_id:
raise ValueError('alert_id not specified')
status = args.get('status', None)
if status not in ('ACTIVE', 'CLOSED'):
raise ValueError('status must be either ACTIVE or CLOSED')
alert = client.update_alert_status(alert_id, status)
# INTEGRATION DEVELOPER TIP
# We want to convert the "updated" time from timestamp(s) to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
if 'updated' in alert:
updated_time_ms = int(alert.get('updated', '0')) * 1000
alert['updated'] = timestamp_to_datestring(updated_time_ms)
# tableToMarkdown() is defined is CommonServerPython.py and is used very
# often to convert lists and dicts into a human readable format in markdown
readable_output = tableToMarkdown(f'HelloWorld Alert {alert_id}', alert)
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Alert',
outputs_key_field='alert_id',
outputs=alert
)
def scan_start_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-start-scan command: Starts a HelloWorld scan
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['hostname']`` hostname to run the scan on
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains a scan job
:rtype: ``CommandResults``
"""
hostname = args.get('hostname', None)
if not hostname:
raise ValueError('hostname not specified')
scan = client.scan_start(hostname=hostname)
# INTEGRATION DEVELOPER TIP
# The API doesn't return the hostname of the scan it was called against,
# which is the input. It could be useful to have that information in the
# XSOAR context, so we are adding it manually here, based on the command
# input argument.
scan['hostname'] = hostname
scan_id = scan.get('scan_id')
readable_output = f'Started scan {scan_id}'
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Scan',
outputs_key_field='scan_id',
outputs=scan
)
def scan_status_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-scan-status command: Returns status for HelloWorld scans
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['scan_id']`` list of scan IDs or single scan ID
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains a scan status
:rtype: ``CommandResults``
"""
scan_id_list = argToList(args.get('scan_id', []))
if len(scan_id_list) == 0:
raise ValueError('scan_id(s) not specified')
scan_list: List[Dict[str, Any]] = []
for scan_id in scan_id_list:
scan = client.scan_status(scan_id=scan_id)
scan_list.append(scan)
readable_output = tableToMarkdown('Scan status', scan_list)
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Scan',
outputs_key_field='scan_id',
outputs=scan_list
)
def scan_results_command(client: Client, args: Dict[str, Any]) -> Union[Dict[str, Any], CommandResults, List[CommandResults]]:
"""helloworld-scan-results command: Returns results for a HelloWorld scan
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['scan_id']`` scan ID to retrieve results
``args['format']`` format of the results. Options are 'file' or 'json'
:return:
A ``CommandResults`` compatible to return ``return_results()``,
that contains a scan result when json format is selected, or
A Dict of entries also compatible to ``return_results()`` that
contains the output file when file format is selected.
:rtype: ``Union[Dict[str, Any],CommandResults]``
"""
scan_id = args.get('scan_id', None)
if not scan_id:
raise ValueError('scan_id not specified')
scan_format = args.get('format', 'file')
# INTEGRATION DEVELOPER TIP
# This function supports returning data in multiple formats, either in a json
# format that is then mapped to a table, or as a file attachment.
# In this case, if the format is "file", the return value is different and
# uses a raw format and ``fileResult()`` directly instead of
# ``CommandResults``. In either case you should return data to main and
# call ``return_results()`` from there.
# Always use ``CommandResults`` when possible but, if you need to return
# anything special like a file, you can use this raw format.
results = client.scan_results(scan_id=scan_id)
if scan_format == 'file':
return (
fileResult(
filename=f'{scan_id}.json',
data=json.dumps(results, indent=4),
file_type=entryTypes['entryInfoFile']
)
)
elif scan_format == 'json':
# This scan returns CVE information. CVE is also part of the XSOAR
# context standard, so we must extract CVE IDs and return them also.
# See: https://xsoar.pan.dev/docs/integrations/context-standards#cve
cves: List[Common.CVE] = []
command_results: List[CommandResults] = []
entities = results.get('entities', [])
for e in entities:
if 'vulns' in e.keys() and isinstance(e['vulns'], list):
cves.extend([Common.CVE(id=c, cvss=None, published=None, modified=None, description=None) for c in e['vulns']])
# INTEGRATION DEVELOPER TIP
# We want to provide a unique result for every CVE indicator.
# Since every entity may contain several CVE indicators,
# we will split the entities result and CVE indicator results.
readable_output = tableToMarkdown(f'Scan {scan_id} results', entities)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Scan',
outputs_key_field='scan_id',
outputs=results
))
cves = list(set(cves)) # make the indicator list unique
for cve in cves:
command_results.append(CommandResults(
readable_output=f"CVE {cve}",
indicator=cve
))
return command_results
else:
raise ValueError('Incorrect format, must be "json" or "file"')
''' MAIN FUNCTION '''
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
api_key = demisto.params().get('apikey')
# get the service API url
base_url = urljoin(demisto.params()['url'], '/api/v1')
# if your Client class inherits from BaseClient, SSL verification is
# handled out of the box by it, just pass ``verify_certificate`` to
# the Client constructor
verify_certificate = not demisto.params().get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = arg_to_datetime(
arg=demisto.params().get('first_fetch', '3 days'),
arg_name='First fetch time',
required=True
)
first_fetch_timestamp = int(first_fetch_time.timestamp()) if first_fetch_time else None
# Using assert as a type guard (since first_fetch_time is always an int when required=True)
assert isinstance(first_fetch_timestamp, int)
# if your Client class inherits from BaseClient, system proxy is handled
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = demisto.params().get('proxy', False)
# INTEGRATION DEVELOPER TIP
# You can use functions such as ``demisto.debug()``, ``demisto.info()``,
# etc. to print information in the XSOAR server log. You can set the log
# level on the server configuration
# See: https://xsoar.pan.dev/docs/integrations/code-conventions#logging
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'Authorization': f'Bearer {api_key}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client, first_fetch_timestamp)
return_results(result)
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
alert_status = demisto.params().get('alert_status', None)
alert_type = demisto.params().get('alert_type', None)
min_severity = demisto.params().get('min_severity', None)
# Convert the argument to an int using helper function or set to MAX_INCIDENTS_TO_FETCH
max_results = arg_to_number(
arg=demisto.params().get('max_fetch'),
arg_name='max_fetch',
required=False
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
next_run, incidents = fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(), # getLastRun() gets the last run dict
first_fetch_time=first_fetch_timestamp,
alert_status=alert_status,
min_severity=min_severity,
alert_type=alert_type
)
# saves next_run for the time fetch-incidents is invoked
demisto.setLastRun(next_run)
# fetch-incidents calls ``demisto.incidents()`` to provide the list
# of incidents to crate
demisto.incidents(incidents)
elif demisto.command() == 'ip':
default_threshold_ip = int(demisto.params().get('threshold_ip', '65'))
return_results(ip_reputation_command(client, demisto.args(), default_threshold_ip))
elif demisto.command() == 'domain':
default_threshold_domain = int(demisto.params().get('threshold_domain', '65'))
return_results(domain_reputation_command(client, demisto.args(), default_threshold_domain))
elif demisto.command() == 'helloworld-say-hello':
return_results(say_hello_command(client, demisto.args()))
elif demisto.command() == 'helloworld-search-alerts':
return_results(search_alerts_command(client, demisto.args()))
elif demisto.command() == 'helloworld-get-alert':
return_results(get_alert_command(client, demisto.args()))
elif demisto.command() == 'helloworld-update-alert-status':
return_results(update_alert_status_command(client, demisto.args()))
elif demisto.command() == 'helloworld-scan-start':
return_results(scan_start_command(client, demisto.args()))
elif demisto.command() == 'helloworld-scan-status':
return_results(scan_status_command(client, demisto.args()))
elif demisto.command() == 'helloworld-scan-results':
return_results(scan_results_command(client, demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 38.937714
| 127
| 0.671319
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import json
import urllib3
import dateparser
import traceback
from typing import Any, Dict, Tuple, List, Optional, Union, cast
urllib3.disable_warnings()
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
MAX_INCIDENTS_TO_FETCH = 50
HELLOWORLD_SEVERITIES = ['Low', 'Medium', 'High', 'Critical']
class Client(BaseClient):
def get_ip_reputation(self, ip: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix='/ip',
params={
'ip': ip
}
)
def get_domain_reputation(self, domain: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix='/domain',
params={
'domain': domain
}
)
def search_alerts(self, alert_status: Optional[str], severity: Optional[str],
alert_type: Optional[str], max_results: Optional[int],
start_time: Optional[int]) -> List[Dict[str, Any]]:
request_params: Dict[str, Any] = {}
if alert_status:
request_params['alert_status'] = alert_status
if alert_type:
request_params['alert_type'] = alert_type
if severity:
request_params['severity'] = severity
if max_results:
request_params['max_results'] = max_results
if start_time:
request_params['start_time'] = start_time
return self._http_request(
method='GET',
url_suffix='/get_alerts',
params=request_params
)
def get_alert(self, alert_id: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix='/get_alert_details',
params={
'alert_id': alert_id
}
)
def update_alert_status(self, alert_id: str, alert_status: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix='/change_alert_status',
params={
'alert_id': alert_id,
'alert_status': alert_status
}
)
def scan_start(self, hostname: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix='/start_scan',
params={
'hostname': hostname
}
)
def scan_status(self, scan_id: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix='/check_scan',
params={
'scan_id': scan_id
}
)
def scan_results(self, scan_id: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix='/get_scan_results',
params={
'scan_id': scan_id
}
)
def say_hello(self, name: str) -> str:
return f'Hello {name}'
def parse_domain_date(domain_date: Union[List[str], str], date_format: str = '%Y-%m-%dT%H:%M:%S.000Z') -> Optional[str]:
if isinstance(domain_date, str):
domain_date_dt = dateparser.parse(domain_date)
if domain_date_dt:
return domain_date_dt.strftime(date_format)
elif isinstance(domain_date, list) and len(domain_date) > 0 and isinstance(domain_date[0], str):
domain_date_dt = dateparser.parse(domain_date[0])
if domain_date_dt:
return domain_date_dt.strftime(date_format)
return None
def convert_to_demisto_severity(severity: str) -> int:
return {
'Low': IncidentSeverity.LOW,
'Medium': IncidentSeverity.MEDIUM,
'High': IncidentSeverity.HIGH,
'Critical': IncidentSeverity.CRITICAL
}[severity]
def test_module(client: Client, first_fetch_time: int) -> str:
# invalid').
try:
client.search_alerts(max_results=1, start_time=first_fetch_time, alert_status=None, alert_type=None,
severity=None)
except DemistoException as e:
if 'Forbidden' in str(e):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return 'ok'
def say_hello_command(client: Client, args: Dict[str, Any]) -> CommandResults:
name = args.get('name', None)
if not name:
raise ValueError('name not specified')
# Call the Client function and get the raw response
result = client.say_hello(name)
# Create the human readable output.
# It will be in markdown format - https://www.markdownguide.org/basic-syntax/
# More complex output can be formatted using ``tableToMarkDown()`` defined
# in ``CommonServerPython.py``
readable_output = f'e information about Context:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# We return a ``CommandResults`` object, and we want to pass a custom
# markdown here, so the argument ``readable_output`` is explicit. If not
# passed, ``CommandResults``` will do a ``tableToMarkdown()`` do the data
# to generate the readable output.
return CommandResults(
readable_output=readable_output,
outputs_prefix='hello',
outputs_key_field='',
outputs=result
)
def fetch_incidents(client: Client, max_results: int, last_run: Dict[str, int],
first_fetch_time: Optional[int], alert_status: Optional[str],
min_severity: str, alert_type: Optional[str]
) -> Tuple[Dict[str, int], List[dict]]:
# Get the last fetch time, if exists
# last_run is a dict with a single key, called last_fetch
last_fetch = last_run.get('last_fetch', None)
# Handle first fetch time
if last_fetch is None:
# if missing, use what provided via first_fetch_time
last_fetch = first_fetch_time
else:
# otherwise use the stored last fetch
last_fetch = int(last_fetch)
# for type checking, making sure that latest_created_time is int
latest_created_time = cast(int, last_fetch)
# Initialize an empty list of incidents to return
# Each incident is a dict with a string as a key
incidents: List[Dict[str, Any]] = []
# Get the CSV list of severities from min_severity
severity = ','.join(HELLOWORLD_SEVERITIES[HELLOWORLD_SEVERITIES.index(min_severity):])
alerts = client.search_alerts(
alert_type=alert_type,
alert_status=alert_status,
max_results=max_results,
start_time=last_fetch,
severity=severity
)
for alert in alerts:
# If no created_time set is as epoch (0). We use time in ms so we must
# convert it from the HelloWorld API response
incident_created_time = int(alert.get('created', '0'))
incident_created_time_ms = incident_created_time * 1000
# to prevent duplicates, we are only adding incidents with creation_time > last fetched incident
if last_fetch:
if incident_created_time <= last_fetch:
continue
# If no name is present it will throw an exception
incident_name = alert['name']
# INTEGRATION DEVELOPER TIP
# The incident dict is initialized with a few mandatory fields:
# name: the incident name
# occurred: the time on when the incident occurred, in ISO8601 format
# we use timestamp_to_datestring() from CommonServerPython.py to
# handle the conversion.
# rawJSON: everything else is packed in a string via json.dumps()
# and is included in rawJSON. It will be used later for classification
# and mapping inside XSOAR.
# severity: it's not mandatory, but is recommended. It must be
incident = {
'name': incident_name,
'occurred': timestamp_to_datestring(incident_created_time_ms),
'rawJSON': json.dumps(alert),
isto_severity(alert.get('severity', 'Low')),
}
incidents.append(incident)
if incident_created_time > latest_created_time:
latest_created_time = incident_created_time
next_run = {'last_fetch': latest_created_time}
return next_run, incidents
def ip_reputation_command(client: Client, args: Dict[str, Any], default_threshold: int) -> List[CommandResults]:
ips = argToList(args.get('ip'))
if len(ips) == 0:
raise ValueError('IP(s) not specified')
# if a score is malicious in your integration documentation.
# Thresholds should also be possible to override, as in this case,
# where threshold is an actual argument of the command.
threshold = int(args.get('threshold', default_threshold))
# Initialize an empty list of CommandResults to return
# each CommandResult will contain context standard for IP
command_results: List[CommandResults] = []
for ip in ips:
ip_data = client.get_ip_reputation(ip)
ip_data['ip'] = ip
# HelloWorld score to XSOAR reputation mapping
# See: https://xsoar.pan.dev/docs/integrations/dbot
# We are using Common.DBotScore as macros to simplify
# the mapping.
score = 0
reputation = int(ip_data.get('score', 0))
if reputation == 0:
score = Common.DBotScore.NONE # unknown
elif reputation >= threshold:
score = Common.DBotScore.BAD # bad
elif reputation >= threshold / 2:
score = Common.DBotScore.SUSPICIOUS # suspicious
else:
score = Common.DBotScore.GOOD # good
# The context is bigger here than other commands, as it consists in 3
# parts: the vendor-specific context (HelloWorld), the standard-context
# (IP) and the DBotScore.
# More information:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# https://xsoar.pan.dev/docs/integrations/context-standards
# https://xsoar.pan.dev/docs/integrations/dbot
# Also check the HelloWorld Design Document
# Create the DBotScore structure first using the Common.DBotScore class.
dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name='HelloWorld',
score=score,
malicious_description=f'Hello World returned reputation {reputation}'
)
# Create the IP Standard Context structure using Common.IP and add
# dbot_score to it.
ip_standard_context = Common.IP(
ip=ip,
asn=ip_data.get('asn'),
dbot_score=dbot_score
)
# INTEGRATION DEVELOPER TIP
# In the integration specific Context output (HelloWorld.IP) in this
# example you want to provide a lot of information as it can be used
# programmatically from within Cortex XSOAR in playbooks and commands.
# On the other hand, this API is way to verbose, so we want to select
# only certain keys to be returned in order not to clog the context
# with useless information. What to actually return in the context and
# to define as a command output is subject to design considerations.
# INTEGRATION DEVELOPER TIP
# To generate the Context Outputs on the YML use ``demisto-sdk``'s
ip_context_excluded_fields = ['objects', 'nir']
ip_data = {k: ip_data[k] for k in ip_data if k not in ip_context_excluded_fields}
readable_output = tableToMarkdown('IP', ip_data)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.IP',
outputs_key_field='ip',
outputs=ip_data,
indicator=ip_standard_context
))
return command_results
def domain_reputation_command(client: Client, args: Dict[str, Any], default_threshold: int) -> List[CommandResults]:
domains = argToList(args.get('domain'))
if len(domains) == 0:
raise ValueError('domain(s) not specified')
threshold = int(args.get('threshold', default_threshold))
command_results: List[CommandResults] = []
for domain in domains:
domain_data = client.get_domain_reputation(domain)
domain_data['domain'] = domain
if 'creation_date' in domain_data:
domain_data['creation_date'] = parse_domain_date(domain_data['creation_date'])
if 'expiration_date' in domain_data:
domain_data['expiration_date'] = parse_domain_date(domain_data['expiration_date'])
if 'updated_date' in domain_data:
domain_data['updated_date'] = parse_domain_date(domain_data['updated_date'])
score = 0
reputation = int(domain_data.get('score', 0))
if reputation == 0:
score = Common.DBotScore.NONE
elif reputation >= threshold:
score = Common.DBotScore.BAD
elif reputation >= threshold / 2:
score = Common.DBotScore.SUSPICIOUS
else:
score = Common.DBotScore.GOOD
dbot_score = Common.DBotScore(
indicator=domain,
integration_name='HelloWorld',
indicator_type=DBotScoreType.DOMAIN,
score=score,
malicious_description=f'Hello World returned reputation {reputation}'
)
domain_standard_context = Common.Domain(
domain=domain,
creation_date=domain_data.get('creation_date', None),
expiration_date=domain_data.get('expiration_date', None),
updated_date=domain_data.get('updated_date', None),
organization=domain_data.get('org', None),
name_servers=domain_data.get('name_servers', None),
registrant_name=domain_data.get('name', None),
registrant_country=domain_data.get('country', None),
registrar_name=domain_data.get('registrar', None),
dbot_score=dbot_score
)
readable_output = tableToMarkdown('Domain', domain_data)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Domain',
outputs_key_field='domain',
outputs=domain_data,
indicator=domain_standard_context
))
return command_results
def search_alerts_command(client: Client, args: Dict[str, Any]) -> CommandResults:
status = args.get('status')
severities: List[str] = HELLOWORLD_SEVERITIES
severity = args.get('severity', None)
if severity:
severities = severity.split(',')
if not all(s in HELLOWORLD_SEVERITIES for s in severities):
raise ValueError(
f'severity must be a comma-separated value '
f'with the following options: {",".join(HELLOWORLD_SEVERITIES)}')
alert_type = args.get('alert_type')
start_time = arg_to_datetime(
arg=args.get('start_time'),
arg_name='start_time',
required=False
)
max_results = arg_to_number(
arg=args.get('max_results'),
arg_name='max_results',
required=False
)
alerts = client.search_alerts(
severity=','.join(severities),
alert_status=status,
alert_type=alert_type,
start_time=int(start_time.timestamp()) if start_time else None,
max_results=max_results
)
for alert in alerts:
if 'created' not in alert:
continue
created_time_ms = int(alert.get('created', '0')) * 1000
alert['created'] = timestamp_to_datestring(created_time_ms)
return CommandResults(
outputs_prefix='HelloWorld.Alert',
outputs_key_field='alert_id',
outputs=alerts
)
def get_alert_command(client: Client, args: Dict[str, Any]) -> CommandResults:
alert_id = args.get('alert_id', None)
if not alert_id:
raise ValueError('alert_id not specified')
alert = client.get_alert(alert_id=alert_id)
if 'created' in alert:
created_time_ms = int(alert.get('created', '0')) * 1000
alert['created'] = timestamp_to_datestring(created_time_ms)
readable_output = tableToMarkdown(f'HelloWorld Alert {alert_id}', alert)
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Alert',
outputs_key_field='alert_id',
outputs=alert
)
def update_alert_status_command(client: Client, args: Dict[str, Any]) -> CommandResults:
alert_id = args.get('alert_id', None)
if not alert_id:
raise ValueError('alert_id not specified')
status = args.get('status', None)
if status not in ('ACTIVE', 'CLOSED'):
raise ValueError('status must be either ACTIVE or CLOSED')
alert = client.update_alert_status(alert_id, status)
if 'updated' in alert:
updated_time_ms = int(alert.get('updated', '0')) * 1000
alert['updated'] = timestamp_to_datestring(updated_time_ms)
readable_output = tableToMarkdown(f'HelloWorld Alert {alert_id}', alert)
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Alert',
outputs_key_field='alert_id',
outputs=alert
)
def scan_start_command(client: Client, args: Dict[str, Any]) -> CommandResults:
hostname = args.get('hostname', None)
if not hostname:
raise ValueError('hostname not specified')
scan = client.scan_start(hostname=hostname)
# which is the input. It could be useful to have that information in the
# XSOAR context, so we are adding it manually here, based on the command
# input argument.
scan['hostname'] = hostname
scan_id = scan.get('scan_id')
readable_output = f'Started scan {scan_id}'
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Scan',
outputs_key_field='scan_id',
outputs=scan
)
def scan_status_command(client: Client, args: Dict[str, Any]) -> CommandResults:
scan_id_list = argToList(args.get('scan_id', []))
if len(scan_id_list) == 0:
raise ValueError('scan_id(s) not specified')
scan_list: List[Dict[str, Any]] = []
for scan_id in scan_id_list:
scan = client.scan_status(scan_id=scan_id)
scan_list.append(scan)
readable_output = tableToMarkdown('Scan status', scan_list)
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Scan',
outputs_key_field='scan_id',
outputs=scan_list
)
def scan_results_command(client: Client, args: Dict[str, Any]) -> Union[Dict[str, Any], CommandResults, List[CommandResults]]:
scan_id = args.get('scan_id', None)
if not scan_id:
raise ValueError('scan_id not specified')
scan_format = args.get('format', 'file')
# INTEGRATION DEVELOPER TIP
# This function supports returning data in multiple formats, either in a json
# format that is then mapped to a table, or as a file attachment.
# In this case, if the format is "file", the return value is different and
# uses a raw format and ``fileResult()`` directly instead of
# ``CommandResults``. In either case you should return data to main and
# call ``return_results()`` from there.
# Always use ``CommandResults`` when possible but, if you need to return
# anything special like a file, you can use this raw format.
results = client.scan_results(scan_id=scan_id)
if scan_format == 'file':
return (
fileResult(
filename=f'{scan_id}.json',
data=json.dumps(results, indent=4),
file_type=entryTypes['entryInfoFile']
)
)
elif scan_format == 'json':
# This scan returns CVE information. CVE is also part of the XSOAR
# context standard, so we must extract CVE IDs and return them also.
# See: https://xsoar.pan.dev/docs/integrations/context-standards#cve
cves: List[Common.CVE] = []
command_results: List[CommandResults] = []
entities = results.get('entities', [])
for e in entities:
if 'vulns' in e.keys() and isinstance(e['vulns'], list):
cves.extend([Common.CVE(id=c, cvss=None, published=None, modified=None, description=None) for c in e['vulns']])
# INTEGRATION DEVELOPER TIP
# We want to provide a unique result for every CVE indicator.
# Since every entity may contain several CVE indicators,
# we will split the entities result and CVE indicator results.
readable_output = tableToMarkdown(f'Scan {scan_id} results', entities)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Scan',
outputs_key_field='scan_id',
outputs=results
))
cves = list(set(cves)) # make the indicator list unique
for cve in cves:
command_results.append(CommandResults(
readable_output=f"CVE {cve}",
indicator=cve
))
return command_results
else:
raise ValueError('Incorrect format, must be "json" or "file"')
def main() -> None:
api_key = demisto.params().get('apikey')
# get the service API url
base_url = urljoin(demisto.params()['url'], '/api/v1')
# if your Client class inherits from BaseClient, SSL verification is
# handled out of the box by it, just pass ``verify_certificate`` to
# the Client constructor
verify_certificate = not demisto.params().get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = arg_to_datetime(
arg=demisto.params().get('first_fetch', '3 days'),
arg_name='First fetch time',
required=True
)
first_fetch_timestamp = int(first_fetch_time.timestamp()) if first_fetch_time else None
# Using assert as a type guard (since first_fetch_time is always an int when required=True)
assert isinstance(first_fetch_timestamp, int)
# if your Client class inherits from BaseClient, system proxy is handled
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = demisto.params().get('proxy', False)
# INTEGRATION DEVELOPER TIP
# You can use functions such as ``demisto.debug()``, ``demisto.info()``,
# etc. to print information in the XSOAR server log. You can set the log
# level on the server configuration
# See: https://xsoar.pan.dev/docs/integrations/code-conventions#logging
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'Authorization': f'Bearer {api_key}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client, first_fetch_timestamp)
return_results(result)
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
alert_status = demisto.params().get('alert_status', None)
alert_type = demisto.params().get('alert_type', None)
min_severity = demisto.params().get('min_severity', None)
# Convert the argument to an int using helper function or set to MAX_INCIDENTS_TO_FETCH
max_results = arg_to_number(
arg=demisto.params().get('max_fetch'),
arg_name='max_fetch',
required=False
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
next_run, incidents = fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(), # getLastRun() gets the last run dict
first_fetch_time=first_fetch_timestamp,
alert_status=alert_status,
min_severity=min_severity,
alert_type=alert_type
)
# saves next_run for the time fetch-incidents is invoked
demisto.setLastRun(next_run)
# fetch-incidents calls ``demisto.incidents()`` to provide the list
# of incidents to crate
demisto.incidents(incidents)
elif demisto.command() == 'ip':
default_threshold_ip = int(demisto.params().get('threshold_ip', '65'))
return_results(ip_reputation_command(client, demisto.args(), default_threshold_ip))
elif demisto.command() == 'domain':
default_threshold_domain = int(demisto.params().get('threshold_domain', '65'))
return_results(domain_reputation_command(client, demisto.args(), default_threshold_domain))
elif demisto.command() == 'helloworld-say-hello':
return_results(say_hello_command(client, demisto.args()))
elif demisto.command() == 'helloworld-search-alerts':
return_results(search_alerts_command(client, demisto.args()))
elif demisto.command() == 'helloworld-get-alert':
return_results(get_alert_command(client, demisto.args()))
elif demisto.command() == 'helloworld-update-alert-status':
return_results(update_alert_status_command(client, demisto.args()))
elif demisto.command() == 'helloworld-scan-start':
return_results(scan_start_command(client, demisto.args()))
elif demisto.command() == 'helloworld-scan-status':
return_results(scan_status_command(client, demisto.args()))
elif demisto.command() == 'helloworld-scan-results':
return_results(scan_results_command(client, demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| true
| true
|
7906ee89072b9793cbb86da75beda2abaf56e9cc
| 70,565
|
py
|
Python
|
twosum.py
|
leocody/Leet-code
|
763fd08159527f6c141a31b3b5ea8357d6218b60
|
[
"MIT"
] | null | null | null |
twosum.py
|
leocody/Leet-code
|
763fd08159527f6c141a31b3b5ea8357d6218b60
|
[
"MIT"
] | null | null | null |
twosum.py
|
leocody/Leet-code
|
763fd08159527f6c141a31b3b5ea8357d6218b60
|
[
"MIT"
] | null | null | null |
from typing import List
def twoSum(nums: List[int], target: int) -> List[int]:
length = len(nums)
for i,v1 in enumerate(nums):
sliced = nums[i + 1: length]
for j,v2 in enumerate(sliced):
result = v1 + v2
if result == target:
return [i, i+j+1]
return []
result = twoSum([6, 1, 4, 5], 7)
assert result == [0, 1]
result2 = twoSum([2, 8, 4, 5], 13)
assert result2 == [1, 3]
result3 = twoSum(
[0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,66,68,70,72,74,76,78,80,82,84,86,88,90,92,94,96,98,100,102,104,106,108,110,112,114,116,118,120,122,124,126,128,130,132,134,136,138,140,142,144,146,148,150,152,154,156,158,160,162,164,166,168,170,172,174,176,178,180,182,184,186,188,190,192,194,196,198,200,202,204,206,208,210,212,214,216,218,220,222,224,226,228,230,232,234,236,238,240,242,244,246,248,250,252,254,256,258,260,262,264,266,268,270,272,274,276,278,280,282,284,286,288,290,292,294,296,298,300,302,304,306,308,310,312,314,316,318,320,322,324,326,328,330,332,334,336,338,340,342,344,346,348,350,352,354,356,358,360,362,364,366,368,370,372,374,376,378,380,382,384,386,388,390,392,394,396,398,400,402,404,406,408,410,412,414,416,418,420,422,424,426,428,430,432,434,436,438,440,442,444,446,448,450,452,454,456,458,460,462,464,466,468,470,472,474,476,478,480,482,484,486,488,490,492,494,496,498,500,502,504,506,508,510,512,514,516,518,520,522,524,526,528,530,532,534,536,538,540,542,544,546,548,550,552,554,556,558,560,562,564,566,568,570,572,574,576,578,580,582,584,586,588,590,592,594,596,598,600,602,604,606,608,610,612,614,616,618,620,622,624,626,628,630,632,634,636,638,640,642,644,646,648,650,652,654,656,658,660,662,664,666,668,670,672,674,676,678,680,682,684,686,688,690,692,694,696,698,700,702,704,706,708,710,712,714,716,718,720,722,724,726,728,730,732,734,736,738,740,742,744,746,748,750,752,754,756,758,760,762,764,766,768,770,772,774,776,778,780,782,784,786,788,790,792,794,796,798,800,802,804,806,808,810,812,814,816,818,820,822,824,826,828,830,832,834,836,838,840,842,844,846,848,850,852,854,856,858,860,862,864,866,868,870,872,874,876,878,880,882,884,886,888,890,892,894,896,898,900,902,904,906,908,910,912,914,916,918,920,922,924,926,928,930,932,934,936,938,940,942,944,946,948,950,952,954,956,958,960,962,964,966,968,970,972,974,976,978,980,982,984,986,988,990,992,994,996,998,1000,1002,1004,1006,1008,1010,1012,1014,1016,1018,1020,1022,1024,1026,1028,1030,1032,1034,1036,1038,1040,1042,1044,1046,1048,1050,1052,1054,1056,1058,1060,1062,1064,1066,1068,1070,1072,1074,1076,1078,1080,1082,1084,1086,1088,1090,1092,1094,1096,1098,1100,1102,1104,1106,1108,1110,1112,1114,1116,1118,1120,1122,1124,1126,1128,1130,1132,1134,1136,1138,1140,1142,1144,1146,1148,1150,1152,1154,1156,1158,1160,1162,1164,1166,1168,1170,1172,1174,1176,1178,1180,1182,1184,1186,1188,1190,1192,1194,1196,1198,1200,1202,1204,1206,1208,1210,1212,1214,1216,1218,1220,1222,1224,1226,1228,1230,1232,1234,1236,1238,1240,1242,1244,1246,1248,1250,1252,1254,1256,1258,1260,1262,1264,1266,1268,1270,1272,1274,1276,1278,1280,1282,1284,1286,1288,1290,1292,1294,1296,1298,1300,1302,1304,1306,1308,1310,1312,1314,1316,1318,1320,1322,1324,1326,1328,1330,1332,1334,1336,1338,1340,1342,1344,1346,1348,1350,1352,1354,1356,1358,1360,1362,1364,1366,1368,1370,1372,1374,1376,1378,1380,1382,1384,1386,1388,1390,1392,1394,1396,1398,1400,1402,1404,1406,1408,1410,1412,1414,1416,1418,1420,1422,1424,1426,1428,1430,1432,1434,1436,1438,1440,1442,1444,1446,1448,1450,1452,1454,1456,1458,1460,1462,1464,1466,1468,1470,1472,1474,1476,1478,1480,1482,1484,1486,1488,1490,1492,1494,1496,1498,1500,1502,1504,1506,1508,1510,1512,1514,1516,1518,1520,1522,1524,1526,1528,1530,1532,1534,1536,1538,1540,1542,1544,1546,1548,1550,1552,1554,1556,1558,1560,1562,1564,1566,1568,1570,1572,1574,1576,1578,1580,1582,1584,1586,1588,1590,1592,1594,1596,1598,1600,1602,1604,1606,1608,1610,1612,1614,1616,1618,1620,1622,1624,1626,1628,1630,1632,1634,1636,1638,1640,1642,1644,1646,1648,1650,1652,1654,1656,1658,1660,1662,1664,1666,1668,1670,1672,1674,1676,1678,1680,1682,1684,1686,1688,1690,1692,1694,1696,1698,1700,1702,1704,1706,1708,1710,1712,1714,1716,1718,1720,1722,1724,1726,1728,1730,1732,1734,1736,1738,1740,1742,1744,1746,1748,1750,1752,1754,1756,1758,1760,1762,1764,1766,1768,1770,1772,1774,1776,1778,1780,1782,1784,1786,1788,1790,1792,1794,1796,1798,1800,1802,1804,1806,1808,1810,1812,1814,1816,1818,1820,1822,1824,1826,1828,1830,1832,1834,1836,1838,1840,1842,1844,1846,1848,1850,1852,1854,1856,1858,1860,1862,1864,1866,1868,1870,1872,1874,1876,1878,1880,1882,1884,1886,1888,1890,1892,1894,1896,1898,1900,1902,1904,1906,1908,1910,1912,1914,1916,1918,1920,1922,1924,1926,1928,1930,1932,1934,1936,1938,1940,1942,1944,1946,1948,1950,1952,1954,1956,1958,1960,1962,1964,1966,1968,1970,1972,1974,1976,1978,1980,1982,1984,1986,1988,1990,1992,1994,1996,1998,2000,2002,2004,2006,2008,2010,2012,2014,2016,2018,2020,2022,2024,2026,2028,2030,2032,2034,2036,2038,2040,2042,2044,2046,2048,2050,2052,2054,2056,2058,2060,2062,2064,2066,2068,2070,2072,2074,2076,2078,2080,2082,2084,2086,2088,2090,2092,2094,2096,2098,2100,2102,2104,2106,2108,2110,2112,2114,2116,2118,2120,2122,2124,2126,2128,2130,2132,2134,2136,2138,2140,2142,2144,2146,2148,2150,2152,2154,2156,2158,2160,2162,2164,2166,2168,2170,2172,2174,2176,2178,2180,2182,2184,2186,2188,2190,2192,2194,2196,2198,2200,2202,2204,2206,2208,2210,2212,2214,2216,2218,2220,2222,2224,2226,2228,2230,2232,2234,2236,2238,2240,2242,2244,2246,2248,2250,2252,2254,2256,2258,2260,2262,2264,2266,2268,2270,2272,2274,2276,2278,2280,2282,2284,2286,2288,2290,2292,2294,2296,2298,2300,2302,2304,2306,2308,2310,2312,2314,2316,2318,2320,2322,2324,2326,2328,2330,2332,2334,2336,2338,2340,2342,2344,2346,2348,2350,2352,2354,2356,2358,2360,2362,2364,2366,2368,2370,2372,2374,2376,2378,2380,2382,2384,2386,2388,2390,2392,2394,2396,2398,2400,2402,2404,2406,2408,2410,2412,2414,2416,2418,2420,2422,2424,2426,2428,2430,2432,2434,2436,2438,2440,2442,2444,2446,2448,2450,2452,2454,2456,2458,2460,2462,2464,2466,2468,2470,2472,2474,2476,2478,2480,2482,2484,2486,2488,2490,2492,2494,2496,2498,2500,2502,2504,2506,2508,2510,2512,2514,2516,2518,2520,2522,2524,2526,2528,2530,2532,2534,2536,2538,2540,2542,2544,2546,2548,2550,2552,2554,2556,2558,2560,2562,2564,2566,2568,2570,2572,2574,2576,2578,2580,2582,2584,2586,2588,2590,2592,2594,2596,2598,2600,2602,2604,2606,2608,2610,2612,2614,2616,2618,2620,2622,2624,2626,2628,2630,2632,2634,2636,2638,2640,2642,2644,2646,2648,2650,2652,2654,2656,2658,2660,2662,2664,2666,2668,2670,2672,2674,2676,2678,2680,2682,2684,2686,2688,2690,2692,2694,2696,2698,2700,2702,2704,2706,2708,2710,2712,2714,2716,2718,2720,2722,2724,2726,2728,2730,2732,2734,2736,2738,2740,2742,2744,2746,2748,2750,2752,2754,2756,2758,2760,2762,2764,2766,2768,2770,2772,2774,2776,2778,2780,2782,2784,2786,2788,2790,2792,2794,2796,2798,2800,2802,2804,2806,2808,2810,2812,2814,2816,2818,2820,2822,2824,2826,2828,2830,2832,2834,2836,2838,2840,2842,2844,2846,2848,2850,2852,2854,2856,2858,2860,2862,2864,2866,2868,2870,2872,2874,2876,2878,2880,2882,2884,2886,2888,2890,2892,2894,2896,2898,2900,2902,2904,2906,2908,2910,2912,2914,2916,2918,2920,2922,2924,2926,2928,2930,2932,2934,2936,2938,2940,2942,2944,2946,2948,2950,2952,2954,2956,2958,2960,2962,2964,2966,2968,2970,2972,2974,2976,2978,2980,2982,2984,2986,2988,2990,2992,2994,2996,2998,3000,3002,3004,3006,3008,3010,3012,3014,3016,3018,3020,3022,3024,3026,3028,3030,3032,3034,3036,3038,3040,3042,3044,3046,3048,3050,3052,3054,3056,3058,3060,3062,3064,3066,3068,3070,3072,3074,3076,3078,3080,3082,3084,3086,3088,3090,3092,3094,3096,3098,3100,3102,3104,3106,3108,3110,3112,3114,3116,3118,3120,3122,3124,3126,3128,3130,3132,3134,3136,3138,3140,3142,3144,3146,3148,3150,3152,3154,3156,3158,3160,3162,3164,3166,3168,3170,3172,3174,3176,3178,3180,3182,3184,3186,3188,3190,3192,3194,3196,3198,3200,3202,3204,3206,3208,3210,3212,3214,3216,3218,3220,3222,3224,3226,3228,3230,3232,3234,3236,3238,3240,3242,3244,3246,3248,3250,3252,3254,3256,3258,3260,3262,3264,3266,3268,3270,3272,3274,3276,3278,3280,3282,3284,3286,3288,3290,3292,3294,3296,3298,3300,3302,3304,3306,3308,3310,3312,3314,3316,3318,3320,3322,3324,3326,3328,3330,3332,3334,3336,3338,3340,3342,3344,3346,3348,3350,3352,3354,3356,3358,3360,3362,3364,3366,3368,3370,3372,3374,3376,3378,3380,3382,3384,3386,3388,3390,3392,3394,3396,3398,3400,3402,3404,3406,3408,3410,3412,3414,3416,3418,3420,3422,3424,3426,3428,3430,3432,3434,3436,3438,3440,3442,3444,3446,3448,3450,3452,3454,3456,3458,3460,3462,3464,3466,3468,3470,3472,3474,3476,3478,3480,3482,3484,3486,3488,3490,3492,3494,3496,3498,3500,3502,3504,3506,3508,3510,3512,3514,3516,3518,3520,3522,3524,3526,3528,3530,3532,3534,3536,3538,3540,3542,3544,3546,3548,3550,3552,3554,3556,3558,3560,3562,3564,3566,3568,3570,3572,3574,3576,3578,3580,3582,3584,3586,3588,3590,3592,3594,3596,3598,3600,3602,3604,3606,3608,3610,3612,3614,3616,3618,3620,3622,3624,3626,3628,3630,3632,3634,3636,3638,3640,3642,3644,3646,3648,3650,3652,3654,3656,3658,3660,3662,3664,3666,3668,3670,3672,3674,3676,3678,3680,3682,3684,3686,3688,3690,3692,3694,3696,3698,3700,3702,3704,3706,3708,3710,3712,3714,3716,3718,3720,3722,3724,3726,3728,3730,3732,3734,3736,3738,3740,3742,3744,3746,3748,3750,3752,3754,3756,3758,3760,3762,3764,3766,3768,3770,3772,3774,3776,3778,3780,3782,3784,3786,3788,3790,3792,3794,3796,3798,3800,3802,3804,3806,3808,3810,3812,3814,3816,3818,3820,3822,3824,3826,3828,3830,3832,3834,3836,3838,3840,3842,3844,3846,3848,3850,3852,3854,3856,3858,3860,3862,3864,3866,3868,3870,3872,3874,3876,3878,3880,3882,3884,3886,3888,3890,3892,3894,3896,3898,3900,3902,3904,3906,3908,3910,3912,3914,3916,3918,3920,3922,3924,3926,3928,3930,3932,3934,3936,3938,3940,3942,3944,3946,3948,3950,3952,3954,3956,3958,3960,3962,3964,3966,3968,3970,3972,3974,3976,3978,3980,3982,3984,3986,3988,3990,3992,3994,3996,3998,4000,4002,4004,4006,4008,4010,4012,4014,4016,4018,4020,4022,4024,4026,4028,4030,4032,4034,4036,4038,4040,4042,4044,4046,4048,4050,4052,4054,4056,4058,4060,4062,4064,4066,4068,4070,4072,4074,4076,4078,4080,4082,4084,4086,4088,4090,4092,4094,4096,4098,4100,4102,4104,4106,4108,4110,4112,4114,4116,4118,4120,4122,4124,4126,4128,4130,4132,4134,4136,4138,4140,4142,4144,4146,4148,4150,4152,4154,4156,4158,4160,4162,4164,4166,4168,4170,4172,4174,4176,4178,4180,4182,4184,4186,4188,4190,4192,4194,4196,4198,4200,4202,4204,4206,4208,4210,4212,4214,4216,4218,4220,4222,4224,4226,4228,4230,4232,4234,4236,4238,4240,4242,4244,4246,4248,4250,4252,4254,4256,4258,4260,4262,4264,4266,4268,4270,4272,4274,4276,4278,4280,4282,4284,4286,4288,4290,4292,4294,4296,4298,4300,4302,4304,4306,4308,4310,4312,4314,4316,4318,4320,4322,4324,4326,4328,4330,4332,4334,4336,4338,4340,4342,4344,4346,4348,4350,4352,4354,4356,4358,4360,4362,4364,4366,4368,4370,4372,4374,4376,4378,4380,4382,4384,4386,4388,4390,4392,4394,4396,4398,4400,4402,4404,4406,4408,4410,4412,4414,4416,4418,4420,4422,4424,4426,4428,4430,4432,4434,4436,4438,4440,4442,4444,4446,4448,4450,4452,4454,4456,4458,4460,4462,4464,4466,4468,4470,4472,4474,4476,4478,4480,4482,4484,4486,4488,4490,4492,4494,4496,4498,4500,4502,4504,4506,4508,4510,4512,4514,4516,4518,4520,4522,4524,4526,4528,4530,4532,4534,4536,4538,4540,4542,4544,4546,4548,4550,4552,4554,4556,4558,4560,4562,4564,4566,4568,4570,4572,4574,4576,4578,4580,4582,4584,4586,4588,4590,4592,4594,4596,4598,4600,4602,4604,4606,4608,4610,4612,4614,4616,4618,4620,4622,4624,4626,4628,4630,4632,4634,4636,4638,4640,4642,4644,4646,4648,4650,4652,4654,4656,4658,4660,4662,4664,4666,4668,4670,4672,4674,4676,4678,4680,4682,4684,4686,4688,4690,4692,4694,4696,4698,4700,4702,4704,4706,4708,4710,4712,4714,4716,4718,4720,4722,4724,4726,4728,4730,4732,4734,4736,4738,4740,4742,4744,4746,4748,4750,4752,4754,4756,4758,4760,4762,4764,4766,4768,4770,4772,4774,4776,4778,4780,4782,4784,4786,4788,4790,4792,4794,4796,4798,4800,4802,4804,4806,4808,4810,4812,4814,4816,4818,4820,4822,4824,4826,4828,4830,4832,4834,4836,4838,4840,4842,4844,4846,4848,4850,4852,4854,4856,4858,4860,4862,4864,4866,4868,4870,4872,4874,4876,4878,4880,4882,4884,4886,4888,4890,4892,4894,4896,4898,4900,4902,4904,4906,4908,4910,4912,4914,4916,4918,4920,4922,4924,4926,4928,4930,4932,4934,4936,4938,4940,4942,4944,4946,4948,4950,4952,4954,4956,4958,4960,4962,4964,4966,4968,4970,4972,4974,4976,4978,4980,4982,4984,4986,4988,4990,4992,4994,4996,4998,5000,5002,5004,5006,5008,5010,5012,5014,5016,5018,5020,5022,5024,5026,5028,5030,5032,5034,5036,5038,5040,5042,5044,5046,5048,5050,5052,5054,5056,5058,5060,5062,5064,5066,5068,5070,5072,5074,5076,5078,5080,5082,5084,5086,5088,5090,5092,5094,5096,5098,5100,5102,5104,5106,5108,5110,5112,5114,5116,5118,5120,5122,5124,5126,5128,5130,5132,5134,5136,5138,5140,5142,5144,5146,5148,5150,5152,5154,5156,5158,5160,5162,5164,5166,5168,5170,5172,5174,5176,5178,5180,5182,5184,5186,5188,5190,5192,5194,5196,5198,5200,5202,5204,5206,5208,5210,5212,5214,5216,5218,5220,5222,5224,5226,5228,5230,5232,5234,5236,5238,5240,5242,5244,5246,5248,5250,5252,5254,5256,5258,5260,5262,5264,5266,5268,5270,5272,5274,5276,5278,5280,5282,5284,5286,5288,5290,5292,5294,5296,5298,5300,5302,5304,5306,5308,5310,5312,5314,5316,5318,5320,5322,5324,5326,5328,5330,5332,5334,5336,5338,5340,5342,5344,5346,5348,5350,5352,5354,5356,5358,5360,5362,5364,5366,5368,5370,5372,5374,5376,5378,5380,5382,5384,5386,5388,5390,5392,5394,5396,5398,5400,5402,5404,5406,5408,5410,5412,5414,5416,5418,5420,5422,5424,5426,5428,5430,5432,5434,5436,5438,5440,5442,5444,5446,5448,5450,5452,5454,5456,5458,5460,5462,5464,5466,5468,5470,5472,5474,5476,5478,5480,5482,5484,5486,5488,5490,5492,5494,5496,5498,5500,5502,5504,5506,5508,5510,5512,5514,5516,5518,5520,5522,5524,5526,5528,5530,5532,5534,5536,5538,5540,5542,5544,5546,5548,5550,5552,5554,5556,5558,5560,5562,5564,5566,5568,5570,5572,5574,5576,5578,5580,5582,5584,5586,5588,5590,5592,5594,5596,5598,5600,5602,5604,5606,5608,5610,5612,5614,5616,5618,5620,5622,5624,5626,5628,5630,5632,5634,5636,5638,5640,5642,5644,5646,5648,5650,5652,5654,5656,5658,5660,5662,5664,5666,5668,5670,5672,5674,5676,5678,5680,5682,5684,5686,5688,5690,5692,5694,5696,5698,5700,5702,5704,5706,5708,5710,5712,5714,5716,5718,5720,5722,5724,5726,5728,5730,5732,5734,5736,5738,5740,5742,5744,5746,5748,5750,5752,5754,5756,5758,5760,5762,5764,5766,5768,5770,5772,5774,5776,5778,5780,5782,5784,5786,5788,5790,5792,5794,5796,5798,5800,5802,5804,5806,5808,5810,5812,5814,5816,5818,5820,5822,5824,5826,5828,5830,5832,5834,5836,5838,5840,5842,5844,5846,5848,5850,5852,5854,5856,5858,5860,5862,5864,5866,5868,5870,5872,5874,5876,5878,5880,5882,5884,5886,5888,5890,5892,5894,5896,5898,5900,5902,5904,5906,5908,5910,5912,5914,5916,5918,5920,5922,5924,5926,5928,5930,5932,5934,5936,5938,5940,5942,5944,5946,5948,5950,5952,5954,5956,5958,5960,5962,5964,5966,5968,5970,5972,5974,5976,5978,5980,5982,5984,5986,5988,5990,5992,5994,5996,5998,6000,6002,6004,6006,6008,6010,6012,6014,6016,6018,6020,6022,6024,6026,6028,6030,6032,6034,6036,6038,6040,6042,6044,6046,6048,6050,6052,6054,6056,6058,6060,6062,6064,6066,6068,6070,6072,6074,6076,6078,6080,6082,6084,6086,6088,6090,6092,6094,6096,6098,6100,6102,6104,6106,6108,6110,6112,6114,6116,6118,6120,6122,6124,6126,6128,6130,6132,6134,6136,6138,6140,6142,6144,6146,6148,6150,6152,6154,6156,6158,6160,6162,6164,6166,6168,6170,6172,6174,6176,6178,6180,6182,6184,6186,6188,6190,6192,6194,6196,6198,6200,6202,6204,6206,6208,6210,6212,6214,6216,6218,6220,6222,6224,6226,6228,6230,6232,6234,6236,6238,6240,6242,6244,6246,6248,6250,6252,6254,6256,6258,6260,6262,6264,6266,6268,6270,6272,6274,6276,6278,6280,6282,6284,6286,6288,6290,6292,6294,6296,6298,6300,6302,6304,6306,6308,6310,6312,6314,6316,6318,6320,6322,6324,6326,6328,6330,6332,6334,6336,6338,6340,6342,6344,6346,6348,6350,6352,6354,6356,6358,6360,6362,6364,6366,6368,6370,6372,6374,6376,6378,6380,6382,6384,6386,6388,6390,6392,6394,6396,6398,6400,6402,6404,6406,6408,6410,6412,6414,6416,6418,6420,6422,6424,6426,6428,6430,6432,6434,6436,6438,6440,6442,6444,6446,6448,6450,6452,6454,6456,6458,6460,6462,6464,6466,6468,6470,6472,6474,6476,6478,6480,6482,6484,6486,6488,6490,6492,6494,6496,6498,6500,6502,6504,6506,6508,6510,6512,6514,6516,6518,6520,6522,6524,6526,6528,6530,6532,6534,6536,6538,6540,6542,6544,6546,6548,6550,6552,6554,6556,6558,6560,6562,6564,6566,6568,6570,6572,6574,6576,6578,6580,6582,6584,6586,6588,6590,6592,6594,6596,6598,6600,6602,6604,6606,6608,6610,6612,6614,6616,6618,6620,6622,6624,6626,6628,6630,6632,6634,6636,6638,6640,6642,6644,6646,6648,6650,6652,6654,6656,6658,6660,6662,6664,6666,6668,6670,6672,6674,6676,6678,6680,6682,6684,6686,6688,6690,6692,6694,6696,6698,6700,6702,6704,6706,6708,6710,6712,6714,6716,6718,6720,6722,6724,6726,6728,6730,6732,6734,6736,6738,6740,6742,6744,6746,6748,6750,6752,6754,6756,6758,6760,6762,6764,6766,6768,6770,6772,6774,6776,6778,6780,6782,6784,6786,6788,6790,6792,6794,6796,6798,6800,6802,6804,6806,6808,6810,6812,6814,6816,6818,6820,6822,6824,6826,6828,6830,6832,6834,6836,6838,6840,6842,6844,6846,6848,6850,6852,6854,6856,6858,6860,6862,6864,6866,6868,6870,6872,6874,6876,6878,6880,6882,6884,6886,6888,6890,6892,6894,6896,6898,6900,6902,6904,6906,6908,6910,6912,6914,6916,6918,6920,6922,6924,6926,6928,6930,6932,6934,6936,6938,6940,6942,6944,6946,6948,6950,6952,6954,6956,6958,6960,6962,6964,6966,6968,6970,6972,6974,6976,6978,6980,6982,6984,6986,6988,6990,6992,6994,6996,6998,7000,7002,7004,7006,7008,7010,7012,7014,7016,7018,7020,7022,7024,7026,7028,7030,7032,7034,7036,7038,7040,7042,7044,7046,7048,7050,7052,7054,7056,7058,7060,7062,7064,7066,7068,7070,7072,7074,7076,7078,7080,7082,7084,7086,7088,7090,7092,7094,7096,7098,7100,7102,7104,7106,7108,7110,7112,7114,7116,7118,7120,7122,7124,7126,7128,7130,7132,7134,7136,7138,7140,7142,7144,7146,7148,7150,7152,7154,7156,7158,7160,7162,7164,7166,7168,7170,7172,7174,7176,7178,7180,7182,7184,7186,7188,7190,7192,7194,7196,7198,7200,7202,7204,7206,7208,7210,7212,7214,7216,7218,7220,7222,7224,7226,7228,7230,7232,7234,7236,7238,7240,7242,7244,7246,7248,7250,7252,7254,7256,7258,7260,7262,7264,7266,7268,7270,7272,7274,7276,7278,7280,7282,7284,7286,7288,7290,7292,7294,7296,7298,7300,7302,7304,7306,7308,7310,7312,7314,7316,7318,7320,7322,7324,7326,7328,7330,7332,7334,7336,7338,7340,7342,7344,7346,7348,7350,7352,7354,7356,7358,7360,7362,7364,7366,7368,7370,7372,7374,7376,7378,7380,7382,7384,7386,7388,7390,7392,7394,7396,7398,7400,7402,7404,7406,7408,7410,7412,7414,7416,7418,7420,7422,7424,7426,7428,7430,7432,7434,7436,7438,7440,7442,7444,7446,7448,7450,7452,7454,7456,7458,7460,7462,7464,7466,7468,7470,7472,7474,7476,7478,7480,7482,7484,7486,7488,7490,7492,7494,7496,7498,7500,7502,7504,7506,7508,7510,7512,7514,7516,7518,7520,7522,7524,7526,7528,7530,7532,7534,7536,7538,7540,7542,7544,7546,7548,7550,7552,7554,7556,7558,7560,7562,7564,7566,7568,7570,7572,7574,7576,7578,7580,7582,7584,7586,7588,7590,7592,7594,7596,7598,7600,7602,7604,7606,7608,7610,7612,7614,7616,7618,7620,7622,7624,7626,7628,7630,7632,7634,7636,7638,7640,7642,7644,7646,7648,7650,7652,7654,7656,7658,7660,7662,7664,7666,7668,7670,7672,7674,7676,7678,7680,7682,7684,7686,7688,7690,7692,7694,7696,7698,7700,7702,7704,7706,7708,7710,7712,7714,7716,7718,7720,7722,7724,7726,7728,7730,7732,7734,7736,7738,7740,7742,7744,7746,7748,7750,7752,7754,7756,7758,7760,7762,7764,7766,7768,7770,7772,7774,7776,7778,7780,7782,7784,7786,7788,7790,7792,7794,7796,7798,7800,7802,7804,7806,7808,7810,7812,7814,7816,7818,7820,7822,7824,7826,7828,7830,7832,7834,7836,7838,7840,7842,7844,7846,7848,7850,7852,7854,7856,7858,7860,7862,7864,7866,7868,7870,7872,7874,7876,7878,7880,7882,7884,7886,7888,7890,7892,7894,7896,7898,7900,7902,7904,7906,7908,7910,7912,7914,7916,7918,7920,7922,7924,7926,7928,7930,7932,7934,7936,7938,7940,7942,7944,7946,7948,7950,7952,7954,7956,7958,7960,7962,7964,7966,7968,7970,7972,7974,7976,7978,7980,7982,7984,7986,7988,7990,7992,7994,7996,7998,8000,8002,8004,8006,8008,8010,8012,8014,8016,8018,8020,8022,8024,8026,8028,8030,8032,8034,8036,8038,8040,8042,8044,8046,8048,8050,8052,8054,8056,8058,8060,8062,8064,8066,8068,8070,8072,8074,8076,8078,8080,8082,8084,8086,8088,8090,8092,8094,8096,8098,8100,8102,8104,8106,8108,8110,8112,8114,8116,8118,8120,8122,8124,8126,8128,8130,8132,8134,8136,8138,8140,8142,8144,8146,8148,8150,8152,8154,8156,8158,8160,8162,8164,8166,8168,8170,8172,8174,8176,8178,8180,8182,8184,8186,8188,8190,8192,8194,8196,8198,8200,8202,8204,8206,8208,8210,8212,8214,8216,8218,8220,8222,8224,8226,8228,8230,8232,8234,8236,8238,8240,8242,8244,8246,8248,8250,8252,8254,8256,8258,8260,8262,8264,8266,8268,8270,8272,8274,8276,8278,8280,8282,8284,8286,8288,8290,8292,8294,8296,8298,8300,8302,8304,8306,8308,8310,8312,8314,8316,8318,8320,8322,8324,8326,8328,8330,8332,8334,8336,8338,8340,8342,8344,8346,8348,8350,8352,8354,8356,8358,8360,8362,8364,8366,8368,8370,8372,8374,8376,8378,8380,8382,8384,8386,8388,8390,8392,8394,8396,8398,8400,8402,8404,8406,8408,8410,8412,8414,8416,8418,8420,8422,8424,8426,8428,8430,8432,8434,8436,8438,8440,8442,8444,8446,8448,8450,8452,8454,8456,8458,8460,8462,8464,8466,8468,8470,8472,8474,8476,8478,8480,8482,8484,8486,8488,8490,8492,8494,8496,8498,8500,8502,8504,8506,8508,8510,8512,8514,8516,8518,8520,8522,8524,8526,8528,8530,8532,8534,8536,8538,8540,8542,8544,8546,8548,8550,8552,8554,8556,8558,8560,8562,8564,8566,8568,8570,8572,8574,8576,8578,8580,8582,8584,8586,8588,8590,8592,8594,8596,8598,8600,8602,8604,8606,8608,8610,8612,8614,8616,8618,8620,8622,8624,8626,8628,8630,8632,8634,8636,8638,8640,8642,8644,8646,8648,8650,8652,8654,8656,8658,8660,8662,8664,8666,8668,8670,8672,8674,8676,8678,8680,8682,8684,8686,8688,8690,8692,8694,8696,8698,8700,8702,8704,8706,8708,8710,8712,8714,8716,8718,8720,8722,8724,8726,8728,8730,8732,8734,8736,8738,8740,8742,8744,8746,8748,8750,8752,8754,8756,8758,8760,8762,8764,8766,8768,8770,8772,8774,8776,8778,8780,8782,8784,8786,8788,8790,8792,8794,8796,8798,8800,8802,8804,8806,8808,8810,8812,8814,8816,8818,8820,8822,8824,8826,8828,8830,8832,8834,8836,8838,8840,8842,8844,8846,8848,8850,8852,8854,8856,8858,8860,8862,8864,8866,8868,8870,8872,8874,8876,8878,8880,8882,8884,8886,8888,8890,8892,8894,8896,8898,8900,8902,8904,8906,8908,8910,8912,8914,8916,8918,8920,8922,8924,8926,8928,8930,8932,8934,8936,8938,8940,8942,8944,8946,8948,8950,8952,8954,8956,8958,8960,8962,8964,8966,8968,8970,8972,8974,8976,8978,8980,8982,8984,8986,8988,8990,8992,8994,8996,8998,9000,9002,9004,9006,9008,9010,9012,9014,9016,9018,9020,9022,9024,9026,9028,9030,9032,9034,9036,9038,9040,9042,9044,9046,9048,9050,9052,9054,9056,9058,9060,9062,9064,9066,9068,9070,9072,9074,9076,9078,9080,9082,9084,9086,9088,9090,9092,9094,9096,9098,9100,9102,9104,9106,9108,9110,9112,9114,9116,9118,9120,9122,9124,9126,9128,9130,9132,9134,9136,9138,9140,9142,9144,9146,9148,9150,9152,9154,9156,9158,9160,9162,9164,9166,9168,9170,9172,9174,9176,9178,9180,9182,9184,9186,9188,9190,9192,9194,9196,9198,9200,9202,9204,9206,9208,9210,9212,9214,9216,9218,9220,9222,9224,9226,9228,9230,9232,9234,9236,9238,9240,9242,9244,9246,9248,9250,9252,9254,9256,9258,9260,9262,9264,9266,9268,9270,9272,9274,9276,9278,9280,9282,9284,9286,9288,9290,9292,9294,9296,9298,9300,9302,9304,9306,9308,9310,9312,9314,9316,9318,9320,9322,9324,9326,9328,9330,9332,9334,9336,9338,9340,9342,9344,9346,9348,9350,9352,9354,9356,9358,9360,9362,9364,9366,9368,9370,9372,9374,9376,9378,9380,9382,9384,9386,9388,9390,9392,9394,9396,9398,9400,9402,9404,9406,9408,9410,9412,9414,9416,9418,9420,9422,9424,9426,9428,9430,9432,9434,9436,9438,9440,9442,9444,9446,9448,9450,9452,9454,9456,9458,9460,9462,9464,9466,9468,9470,9472,9474,9476,9478,9480,9482,9484,9486,9488,9490,9492,9494,9496,9498,9500,9502,9504,9506,9508,9510,9512,9514,9516,9518,9520,9522,9524,9526,9528,9530,9532,9534,9536,9538,9540,9542,9544,9546,9548,9550,9552,9554,9556,9558,9560,9562,9564,9566,9568,9570,9572,9574,9576,9578,9580,9582,9584,9586,9588,9590,9592,9594,9596,9598,9600,9602,9604,9606,9608,9610,9612,9614,9616,9618,9620,9622,9624,9626,9628,9630,9632,9634,9636,9638,9640,9642,9644,9646,9648,9650,9652,9654,9656,9658,9660,9662,9664,9666,9668,9670,9672,9674,9676,9678,9680,9682,9684,9686,9688,9690,9692,9694,9696,9698,9700,9702,9704,9706,9708,9710,9712,9714,9716,9718,9720,9722,9724,9726,9728,9730,9732,9734,9736,9738,9740,9742,9744,9746,9748,9750,9752,9754,9756,9758,9760,9762,9764,9766,9768,9770,9772,9774,9776,9778,9780,9782,9784,9786,9788,9790,9792,9794,9796,9798,9800,9802,9804,9806,9808,9810,9812,9814,9816,9818,9820,9822,9824,9826,9828,9830,9832,9834,9836,9838,9840,9842,9844,9846,9848,9850,9852,9854,9856,9858,9860,9862,9864,9866,9868,9870,9872,9874,9876,9878,9880,9882,9884,9886,9888,9890,9892,9894,9896,9898,9900,9902,9904,9906,9908,9910,9912,9914,9916,9918,9920,9922,9924,9926,9928,9930,9932,9934,9936,9938,9940,9942,9944,9946,9948,9950,9952,9954,9956,9958,9960,9962,9964,9966,9968,9970,9972,9974,9976,9978,9980,9982,9984,9986,9988,9990,9992,9994,9996,9998,10000,10002,10004,10006,10008,10010,10012,10014,10016,10018,10020,10022,10024,10026,10028,10030,10032,10034,10036,10038,10040,10042,10044,10046,10048,10050,10052,10054,10056,10058,10060,10062,10064,10066,10068,10070,10072,10074,10076,10078,10080,10082,10084,10086,10088,10090,10092,10094,10096,10098,10100,10102,10104,10106,10108,10110,10112,10114,10116,10118,10120,10122,10124,10126,10128,10130,10132,10134,10136,10138,10140,10142,10144,10146,10148,10150,10152,10154,10156,10158,10160,10162,10164,10166,10168,10170,10172,10174,10176,10178,10180,10182,10184,10186,10188,10190,10192,10194,10196,10198,10200,10202,10204,10206,10208,10210,10212,10214,10216,10218,10220,10222,10224,10226,10228,10230,10232,10234,10236,10238,10240,10242,10244,10246,10248,10250,10252,10254,10256,10258,10260,10262,10264,10266,10268,10270,10272,10274,10276,10278,10280,10282,10284,10286,10288,10290,10292,10294,10296,10298,10300,10302,10304,10306,10308,10310,10312,10314,10316,10318,10320,10322,10324,10326,10328,10330,10332,10334,10336,10338,10340,10342,10344,10346,10348,10350,10352,10354,10356,10358,10360,10362,10364,10366,10368,10370,10372,10374,10376,10378,10380,10382,10384,10386,10388,10390,10392,10394,10396,10398,10400,10402,10404,10406,10408,10410,10412,10414,10416,10418,10420,10422,10424,10426,10428,10430,10432,10434,10436,10438,10440,10442,10444,10446,10448,10450,10452,10454,10456,10458,10460,10462,10464,10466,10468,10470,10472,10474,10476,10478,10480,10482,10484,10486,10488,10490,10492,10494,10496,10498,10500,10502,10504,10506,10508,10510,10512,10514,10516,10518,10520,10522,10524,10526,10528,10530,10532,10534,10536,10538,10540,10542,10544,10546,10548,10550,10552,10554,10556,10558,10560,10562,10564,10566,10568,10570,10572,10574,10576,10578,10580,10582,10584,10586,10588,10590,10592,10594,10596,10598,10600,10602,10604,10606,10608,10610,10612,10614,10616,10618,10620,10622,10624,10626,10628,10630,10632,10634,10636,10638,10640,10642,10644,10646,10648,10650,10652,10654,10656,10658,10660,10662,10664,10666,10668,10670,10672,10674,10676,10678,10680,10682,10684,10686,10688,10690,10692,10694,10696,10698,10700,10702,10704,10706,10708,10710,10712,10714,10716,10718,10720,10722,10724,10726,10728,10730,10732,10734,10736,10738,10740,10742,10744,10746,10748,10750,10752,10754,10756,10758,10760,10762,10764,10766,10768,10770,10772,10774,10776,10778,10780,10782,10784,10786,10788,10790,10792,10794,10796,10798,10800,10802,10804,10806,10808,10810,10812,10814,10816,10818,10820,10822,10824,10826,10828,10830,10832,10834,10836,10838,10840,10842,10844,10846,10848,10850,10852,10854,10856,10858,10860,10862,10864,10866,10868,10870,10872,10874,10876,10878,10880,10882,10884,10886,10888,10890,10892,10894,10896,10898,10900,10902,10904,10906,10908,10910,10912,10914,10916,10918,10920,10922,10924,10926,10928,10930,10932,10934,10936,10938,10940,10942,10944,10946,10948,10950,10952,10954,10956,10958,10960,10962,10964,10966,10968,10970,10972,10974,10976,10978,10980,10982,10984,10986,10988,10990,10992,10994,10996,10998,11000,11002,11004,11006,11008,11010,11012,11014,11016,11018,11020,11022,11024,11026,11028,11030,11032,11034,11036,11038,11040,11042,11044,11046,11048,11050,11052,11054,11056,11058,11060,11062,11064,11066,11068,11070,11072,11074,11076,11078,11080,11082,11084,11086,11088,11090,11092,11094,11096,11098,11100,11102,11104,11106,11108,11110,11112,11114,11116,11118,11120,11122,11124,11126,11128,11130,11132,11134,11136,11138,11140,11142,11144,11146,11148,11150,11152,11154,11156,11158,11160,11162,11164,11166,11168,11170,11172,11174,11176,11178,11180,11182,11184,11186,11188,11190,11192,11194,11196,11198,11200,11202,11204,11206,11208,11210,11212,11214,11216,11218,11220,11222,11224,11226,11228,11230,11232,11234,11236,11238,11240,11242,11244,11246,11248,11250,11252,11254,11256,11258,11260,11262,11264,11266,11268,11270,11272,11274,11276,11278,11280,11282,11284,11286,11288,11290,11292,11294,11296,11298,11300,11302,11304,11306,11308,11310,11312,11314,11316,11318,11320,11322,11324,11326,11328,11330,11332,11334,11336,11338,11340,11342,11344,11346,11348,11350,11352,11354,11356,11358,11360,11362,11364,11366,11368,11370,11372,11374,11376,11378,11380,11382,11384,11386,11388,11390,11392,11394,11396,11398,11400,11402,11404,11406,11408,11410,11412,11414,11416,11418,11420,11422,11424,11426,11428,11430,11432,11434,11436,11438,11440,11442,11444,11446,11448,11450,11452,11454,11456,11458,11460,11462,11464,11466,11468,11470,11472,11474,11476,11478,11480,11482,11484,11486,11488,11490,11492,11494,11496,11498,11500,11502,11504,11506,11508,11510,11512,11514,11516,11518,11520,11522,11524,11526,11528,11530,11532,11534,11536,11538,11540,11542,11544,11546,11548,11550,11552,11554,11556,11558,11560,11562,11564,11566,11568,11570,11572,11574,11576,11578,11580,11582,11584,11586,11588,11590,11592,11594,11596,11598,11600,11602,11604,11606,11608,11610,11612,11614,11616,11618,11620,11622,11624,11626,11628,11630,11632,11634,11636,11638,11640,11642,11644,11646,11648,11650,11652,11654,11656,11658,11660,11662,11664,11666,11668,11670,11672,11674,11676,11678,11680,11682,11684,11686,11688,11690,11692,11694,11696,11698,11700,11702,11704,11706,11708,11710,11712,11714,11716,11718,11720,11722,11724,11726,11728,11730,11732,11734,11736,11738,11740,11742,11744,11746,11748,11750,11752,11754,11756,11758,11760,11762,11764,11766,11768,11770,11772,11774,11776,11778,11780,11782,11784,11786,11788,11790,11792,11794,11796,11798,11800,11802,11804,11806,11808,11810,11812,11814,11816,11818,11820,11822,11824,11826,11828,11830,11832,11834,11836,11838,11840,11842,11844,11846,11848,11850,11852,11854,11856,11858,11860,11862,11864,11866,11868,11870,11872,11874,11876,11878,11880,11882,11884,11886,11888,11890,11892,11894,11896,11898,11900,11902,11904,11906,11908,11910,11912,11914,11916,11918,11920,11922,11924,11926,11928,11930,11932,11934,11936,11938,11940,11942,11944,11946,11948,11950,11952,11954,11956,11958,11960,11962,11964,11966,11968,11970,11972,11974,11976,11978,11980,11982,11984,11986,11988,11990,11992,11994,11996,11998,12000,12002,12004,12006,12008,12010,12012,12014,12016,12018,12020,12022,12024,12026,12028,12030,12032,12034,12036,12038,12040,12042,12044,12046,12048,12050,12052,12054,12056,12058,12060,12062,12064,12066,12068,12070,12072,12074,12076,12078,12080,12082,12084,12086,12088,12090,12092,12094,12096,12098,12100,12102,12104,12106,12108,12110,12112,12114,12116,12118,12120,12122,12124,12126,12128,12130,12132,12134,12136,12138,12140,12142,12144,12146,12148,12150,12152,12154,12156,12158,12160,12162,12164,12166,12168,12170,12172,12174,12176,12178,12180,12182,12184,12186,12188,12190,12192,12194,12196,12198,12200,12202,12204,12206,12208,12210,12212,12214,12216,12218,12220,12222,12224,12226,12228,12230,12232,12234,12236,12238,12240,12242,12244,12246,12248,12250,12252,12254,12256,12258,12260,12262,12264,12266,12268,12270,12272,12274,12276,12278,12280,12282,12284,12286,12288,12290,12292,12294,12296,12298,12300,12302,12304,12306,12308,12310,12312,12314,12316,12318,12320,12322,12324,12326,12328,12330,12332,12334,12336,12338,12340,12342,12344,12346,12348,12350,12352,12354,12356,12358,12360,12362,12364,12366,12368,12370,12372,12374,12376,12378,12380,12382,12384,12386,12388,12390,12392,12394,12396,12398,12400,12402,12404,12406,12408,12410,12412,12414,12416,12418,12420,12422,12424,12426,12428,12430,12432,12434,12436,12438,12440,12442,12444,12446,12448,12450,12452,12454,12456,12458,12460,12462,12464,12466,12468,12470,12472,12474,12476,12478,12480,12482,12484,12486,12488,12490,12492,12494,12496,12498,12500,12502,12504,12506,12508,12510,12512,12514,12516,12518,12520,12522,12524,12526,12528,12530,12532,12534,12536,12538,12540,12542,12544,12546,12548,12550,12552,12554,12556,12558,12560,12562,12564,12566,12568,12570,12572,12574,12576,12578,12580,12582,12584,12586,12588,12590,12592,12594,12596,12598,12600,12602,12604,12606,12608,12610,12612,12614,12616,12618,12620,12622,12624,12626,12628,12630,12632,12634,12636,12638,12640,12642,12644,12646,12648,12650,12652,12654,12656,12658,12660,12662,12664,12666,12668,12670,12672,12674,12676,12678,12680,12682,12684,12686,12688,12690,12692,12694,12696,12698,12700,12702,12704,12706,12708,12710,12712,12714,12716,12718,12720,12722,12724,12726,12728,12730,12732,12734,12736,12738,12740,12742,12744,12746,12748,12750,12752,12754,12756,12758,12760,12762,12764,12766,12768,12770,12772,12774,12776,12778,12780,12782,12784,12786,12788,12790,12792,12794,12796,12798,12800,12802,12804,12806,12808,12810,12812,12814,12816,12818,12820,12822,12824,12826,12828,12830,12832,12834,12836,12838,12840,12842,12844,12846,12848,12850,12852,12854,12856,12858,12860,12862,12864,12866,12868,12870,12872,12874,12876,12878,12880,12882,12884,12886,12888,12890,12892,12894,12896,12898,12900,12902,12904,12906,12908,12910,12912,12914,12916,12918,12920,12922,12924,12926,12928,12930,12932,12934,12936,12938,12940,12942,12944,12946,12948,12950,12952,12954,12956,12958,12960,12962,12964,12966,12968,12970,12972,12974,12976,12978,12980,12982,12984,12986,12988,12990,12992,12994,12996,12998,13000,13002,13004,13006,13008,13010,13012,13014,13016,13018,13020,13022,13024,13026,13028,13030,13032,13034,13036,13038,13040,13042,13044,13046,13048,13050,13052,13054,13056,13058,13060,13062,13064,13066,13068,13070,13072,13074,13076,13078,13080,13082,13084,13086,13088,13090,13092,13094,13096,13098,13100,13102,13104,13106,13108,13110,13112,13114,13116,13118,13120,13122,13124,13126,13128,13130,13132,13134,13136,13138,13140,13142,13144,13146,13148,13150,13152,13154,13156,13158,13160,13162,13164,13166,13168,13170,13172,13174,13176,13178,13180,13182,13184,13186,13188,13190,13192,13194,13196,13198,13200,13202,13204,13206,13208,13210,13212,13214,13216,13218,13220,13222,13224,13226,13228,13230,13232,13234,13236,13238,13240,13242,13244,13246,13248,13250,13252,13254,13256,13258,13260,13262,13264,13266,13268,13270,13272,13274,13276,13278,13280,13282,13284,13286,13288,13290,13292,13294,13296,13298,13300,13302,13304,13306,13308,13310,13312,13314,13316,13318,13320,13322,13324,13326,13328,13330,13332,13334,13336,13338,13340,13342,13344,13346,13348,13350,13352,13354,13356,13358,13360,13362,13364,13366,13368,13370,13372,13374,13376,13378,13380,13382,13384,13386,13388,13390,13392,13394,13396,13398,13400,13402,13404,13406,13408,13410,13412,13414,13416,13418,13420,13422,13424,13426,13428,13430,13432,13434,13436,13438,13440,13442,13444,13446,13448,13450,13452,13454,13456,13458,13460,13462,13464,13466,13468,13470,13472,13474,13476,13478,13480,13482,13484,13486,13488,13490,13492,13494,13496,13498,13500,13502,13504,13506,13508,13510,13512,13514,13516,13518,13520,13522,13524,13526,13528,13530,13532,13534,13536,13538,13540,13542,13544,13546,13548,13550,13552,13554,13556,13558,13560,13562,13564,13566,13568,13570,13572,13574,13576,13578,13580,13582,13584,13586,13588,13590,13592,13594,13596,13598,13600,13602,13604,13606,13608,13610,13612,13614,13616,13618,13620,13622,13624,13626,13628,13630,13632,13634,13636,13638,13640,13642,13644,13646,13648,13650,13652,13654,13656,13658,13660,13662,13664,13666,13668,13670,13672,13674,13676,13678,13680,13682,13684,13686,13688,13690,13692,13694,13696,13698,13700,13702,13704,13706,13708,13710,13712,13714,13716,13718,13720,13722,13724,13726,13728,13730,13732,13734,13736,13738,13740,13742,13744,13746,13748,13750,13752,13754,13756,13758,13760,13762,13764,13766,13768,13770,13772,13774,13776,13778,13780,13782,13784,13786,13788,13790,13792,13794,13796,13798,13800,13802,13804,13806,13808,13810,13812,13814,13816,13818,13820,13822,13824,13826,13828,13830,13832,13834,13836,13838,13840,13842,13844,13846,13848,13850,13852,13854,13856,13858,13860,13862,13864,13866,13868,13870,13872,13874,13876,13878,13880,13882,13884,13886,13888,13890,13892,13894,13896,13898,13900,13902,13904,13906,13908,13910,13912,13914,13916,13918,13920,13922,13924,13926,13928,13930,13932,13934,13936,13938,13940,13942,13944,13946,13948,13950,13952,13954,13956,13958,13960,13962,13964,13966,13968,13970,13972,13974,13976,13978,13980,13982,13984,13986,13988,13990,13992,13994,13996,13998,14000,14002,14004,14006,14008,14010,14012,14014,14016,14018,14020,14022,14024,14026,14028,14030,14032,14034,14036,14038,14040,14042,14044,14046,14048,14050,14052,14054,14056,14058,14060,14062,14064,14066,14068,14070,14072,14074,14076,14078,14080,14082,14084,14086,14088,14090,14092,14094,14096,14098,14100,14102,14104,14106,14108,14110,14112,14114,14116,14118,14120,14122,14124,14126,14128,14130,14132,14134,14136,14138,14140,14142,14144,14146,14148,14150,14152,14154,14156,14158,14160,14162,14164,14166,14168,14170,14172,14174,14176,14178,14180,14182,14184,14186,14188,14190,14192,14194,14196,14198,14200,14202,14204,14206,14208,14210,14212,14214,14216,14218,14220,14222,14224,14226,14228,14230,14232,14234,14236,14238,14240,14242,14244,14246,14248,14250,14252,14254,14256,14258,14260,14262,14264,14266,14268,14270,14272,14274,14276,14278,14280,14282,14284,14286,14288,14290,14292,14294,14296,14298,14300,14302,14304,14306,14308,14310,14312,14314,14316,14318,14320,14322,14324,14326,14328,14330,14332,14334,14336,14338,14340,14342,14344,14346,14348,14350,14352,14354,14356,14358,14360,14362,14364,14366,14368,14370,14372,14374,14376,14378,14380,14382,14384,14386,14388,14390,14392,14394,14396,14398,14400,14402,14404,14406,14408,14410,14412,14414,14416,14418,14420,14422,14424,14426,14428,14430,14432,14434,14436,14438,14440,14442,14444,14446,14448,14450,14452,14454,14456,14458,14460,14462,14464,14466,14468,14470,14472,14474,14476,14478,14480,14482,14484,14486,14488,14490,14492,14494,14496,14498,14500,14502,14504,14506,14508,14510,14512,14514,14516,14518,14520,14522,14524,14526,14528,14530,14532,14534,14536,14538,14540,14542,14544,14546,14548,14550,14552,14554,14556,14558,14560,14562,14564,14566,14568,14570,14572,14574,14576,14578,14580,14582,14584,14586,14588,14590,14592,14594,14596,14598,14600,14602,14604,14606,14608,14610,14612,14614,14616,14618,14620,14622,14624,14626,14628,14630,14632,14634,14636,14638,14640,14642,14644,14646,14648,14650,14652,14654,14656,14658,14660,14662,14664,14666,14668,14670,14672,14674,14676,14678,14680,14682,14684,14686,14688,14690,14692,14694,14696,14698,14700,14702,14704,14706,14708,14710,14712,14714,14716,14718,14720,14722,14724,14726,14728,14730,14732,14734,14736,14738,14740,14742,14744,14746,14748,14750,14752,14754,14756,14758,14760,14762,14764,14766,14768,14770,14772,14774,14776,14778,14780,14782,14784,14786,14788,14790,14792,14794,14796,14798,14800,14802,14804,14806,14808,14810,14812,14814,14816,14818,14820,14822,14824,14826,14828,14830,14832,14834,14836,14838,14840,14842,14844,14846,14848,14850,14852,14854,14856,14858,14860,14862,14864,14866,14868,14870,14872,14874,14876,14878,14880,14882,14884,14886,14888,14890,14892,14894,14896,14898,14900,14902,14904,14906,14908,14910,14912,14914,14916,14918,14920,14922,14924,14926,14928,14930,14932,14934,14936,14938,14940,14942,14944,14946,14948,14950,14952,14954,14956,14958,14960,14962,14964,14966,14968,14970,14972,14974,14976,14978,14980,14982,14984,14986,14988,14990,14992,14994,14996,14998,15000,15002,15004,15006,15008,15010,15012,15014,15016,15018,15020,15022,15024,15026,15028,15030,15032,15034,15036,15038,15040,15042,15044,15046,15048,15050,15052,15054,15056,15058,15060,15062,15064,15066,15068,15070,15072,15074,15076,15078,15080,15082,15084,15086,15088,15090,15092,15094,15096,15098,15100,15102,15104,15106,15108,15110,15112,15114,15116,15118,15120,15122,15124,15126,15128,15130,15132,15134,15136,15138,15140,15142,15144,15146,15148,15150,15152,15154,15156,15158,15160,15162,15164,15166,15168,15170,15172,15174,15176,15178,15180,15182,15184,15186,15188,15190,15192,15194,15196,15198,15200,15202,15204,15206,15208,15210,15212,15214,15216,15218,15220,15222,15224,15226,15228,15230,15232,15234,15236,15238,15240,15242,15244,15246,15248,15250,15252,15254,15256,15258,15260,15262,15264,15266,15268,15270,15272,15274,15276,15278,15280,15282,15284,15286,15288,15290,15292,15294,15296,15298,15300,15302,15304,15306,15308,15310,15312,15314,15316,15318,15320,15322,15324,15326,15328,15330,15332,15334,15336,15338,15340,15342,15344,15346,15348,15350,15352,15354,15356,15358,15360,15362,15364,15366,15368,15370,15372,15374,15376,15378,15380,15382,15384,15386,15388,15390,15392,15394,15396,15398,15400,15402,15404,15406,15408,15410,15412,15414,15416,15418,15420,15422,15424,15426,15428,15430,15432,15434,15436,15438,15440,15442,15444,15446,15448,15450,15452,15454,15456,15458,15460,15462,15464,15466,15468,15470,15472,15474,15476,15478,15480,15482,15484,15486,15488,15490,15492,15494,15496,15498,15500,15502,15504,15506,15508,15510,15512,15514,15516,15518,15520,15522,15524,15526,15528,15530,15532,15534,15536,15538,15540,15542,15544,15546,15548,15550,15552,15554,15556,15558,15560,15562,15564,15566,15568,15570,15572,15574,15576,15578,15580,15582,15584,15586,15588,15590,15592,15594,15596,15598,15600,15602,15604,15606,15608,15610,15612,15614,15616,15618,15620,15622,15624,15626,15628,15630,15632,15634,15636,15638,15640,15642,15644,15646,15648,15650,15652,15654,15656,15658,15660,15662,15664,15666,15668,15670,15672,15674,15676,15678,15680,15682,15684,15686,15688,15690,15692,15694,15696,15698,15700,15702,15704,15706,15708,15710,15712,15714,15716,15718,15720,15722,15724,15726,15728,15730,15732,15734,15736,15738,15740,15742,15744,15746,15748,15750,15752,15754,15756,15758,15760,15762,15764,15766,15768,15770,15772,15774,15776,15778,15780,15782,15784,15786,15788,15790,15792,15794,15796,15798,15800,15802,15804,15806,15808,15810,15812,15814,15816,15818,15820,15822,15824,15826,15828,15830,15832,15834,15836,15838,15840,15842,15844,15846,15848,15850,15852,15854,15856,15858,15860,15862,15864,15866,15868,15870,15872,15874,15876,15878,15880,15882,15884,15886,15888,15890,15892,15894,15896,15898,15900,15902,15904,15906,15908,15910,15912,15914,15916,15918,15920,15922,15924,15926,15928,15930,15932,15934,15936,15938,15940,15942,15944,15946,15948,15950,15952,15954,15956,15958,15960,15962,15964,15966,15968,15970,15972,15974,15976,15978,15980,15982,15984,15986,15988,15990,15992,15994,15996,15998,16000,16002,16004,16006,16008,16010,16012,16014,16016,16018,16020,1,16024,16026,16028,16030,16032,16034,16036,16038,16040,16042,16044,16046,16048,16050,16052,16054,16056,16058,16060,16062,16064,16066,16068,16070,16072,16074,16076,16078,16080,16082,16084,16086,16088,16090,16092,16094,16096,16098,16100,16102,16104,16106,16108,16110,16112,16114,16116,16118,16120,16122,16124,16126,16128,16130,16132,16134,16136,16138,16140,16142,16144,16146,16148,16150,16152,16154,16156,16158,16160,16162,16164,16166,16168,16170,16172,16174,16176,16178,16180,16182,16184,16186,16188,16190,16192,16194,16196,16198,16200,16202,16204,16206,16208,16210,16212,16214,16216,16218,16220,16222,16224,16226,16228,16230,16232,16234,16236,16238,16240,16242,16244,16246,16248,16250,16252,16254,16256,16258,16260,16262,16264,16266,16268,16270,16272,16274,16276,16278,16280,16282,16284,16286,16288,16290,16292,16294,16296,16298,16300,16302,16304,16306,16308,16310,16312,16314,16316,16318,16320,16322,16324,16326,16328,16330,16332,16334,16336,16338,16340,16342,16344,16346,16348,16350,16352,16354,16356,16358,16360,16362,16364,16366,16368,16370,16372,16374,16376,16378,16380,16382,16384,16386,16388,16390,16392,16394,16396,16398,16400,16402,16404,16406,16408,16410,16412,16414,16416,16418,16420,16422,16424,16426,16428,16430,16432,16434,16436,16438,16440,16442,16444,16446,16448,16450,16452,16454,16456,16458,16460,16462,16464,16466,16468,16470,16472,16474,16476,16478,16480,16482,16484,16486,16488,16490,16492,16494,16496,16498,16500,16502,16504,16506,16508,16510,16512,16514,16516,16518,16520,16522,16524,16526,16528,16530,16532,16534,16536,16538,16540,16542,16544,16546,16548,16550,16552,16554,16556,16558,16560,16562,16564,16566,16568,16570,16572,16574,16576,16578,16580,16582,16584,16586,16588,16590,16592,16594,16596,16598,16600,16602,16604,16606,16608,16610,16612,16614,16616,16618,16620,16622,16624,16626,16628,16630,16632,16634,16636,16638,16640,16642,16644,16646,16648,16650,16652,16654,16656,16658,16660,16662,16664,16666,16668,16670,16672,16674,16676,16678,16680,16682,16684,16686,16688,16690,16692,16694,16696,16698,16700,16702,16704,16706,16708,16710,16712,16714,16716,16718,16720,16722,16724,16726,16728,16730,16732,16734,16736,16738,16740,16742,16744,16746,16748,16750,16752,16754,16756,16758,16760,16762,16764,16766,16768,16770,16772,16774,16776,16778,16780,16782,16784,16786,16788,16790,16792,16794,16796,16798,16800,16802,16804,16806,16808,16810,16812,16814,16816,16818,16820,16822,16824,16826,16828,16830,16832,16834,16836,16838,16840,16842,16844,16846,16848,16850,16852,16854,16856,16858,16860,16862,16864,16866,16868,16870,16872,16874,16876,16878,16880,16882,16884,16886,16888,16890,16892,16894,16896,16898,16900,16902,16904,16906,16908,16910,16912,16914,16916,16918,16920,16922,16924,16926,16928,16930,16932,16934,16936,16938,16940,16942,16944,16946,16948,16950,16952,16954,16956,16958,16960,16962,16964,16966,16968,16970,16972,16974,16976,16978,16980,16982,16984,16986,16988,16990,16992,16994,16996,16998,17000,17002,17004,17006,17008,17010,17012,17014,17016,17018,17020,17022,17024,17026,17028,17030,17032,17034,17036,17038,17040,17042,17044,17046,17048,17050,17052,17054,17056,17058,17060,17062,17064,17066,17068,17070,17072,17074,17076,17078,17080,17082,17084,17086,17088,17090,17092,17094,17096,17098,17100,17102,17104,17106,17108,17110,17112,17114,17116,17118,17120,17122,17124,17126,17128,17130,17132,17134,17136,17138,17140,17142,17144,17146,17148,17150,17152,17154,17156,17158,17160,17162,17164,17166,17168,17170,17172,17174,17176,17178,17180,17182,17184,17186,17188,17190,17192,17194,17196,17198,17200,17202,17204,17206,17208,17210,17212,17214,17216,17218,17220,17222,17224,17226,17228,17230,17232,17234,17236,17238,17240,17242,17244,17246,17248,17250,17252,17254,17256,17258,17260,17262,17264,17266,17268,17270,17272,17274,17276,17278,17280,17282,17284,17286,17288,17290,17292,17294,17296,17298,17300,17302,17304,17306,17308,17310,17312,17314,17316,17318,17320,17322,17324,17326,17328,17330,17332,17334,17336,17338,17340,17342,17344,17346,17348,17350,17352,17354,17356,17358,17360,17362,17364,17366,17368,17370,17372,17374,17376,17378,17380,17382,17384,17386,17388,17390,17392,17394,17396,17398,17400,17402,17404,17406,17408,17410,17412,17414,17416,17418,17420,17422,17424,17426,17428,17430,17432,17434,17436,17438,17440,17442,17444,17446,17448,17450,17452,17454,17456,17458,17460,17462,17464,17466,17468,17470,17472,17474,17476,17478,17480,17482,17484,17486,17488,17490,17492,17494,17496,17498,17500,17502,17504,17506,17508,17510,17512,17514,17516,17518,17520,17522,17524,17526,17528,17530,17532,17534,17536,17538,17540,17542,17544,17546,17548,17550,17552,17554,17556,17558,17560,17562,17564,17566,17568,17570,17572,17574,17576,17578,17580,17582,17584,17586,17588,17590,17592,17594,17596,17598,17600,17602,17604,17606,17608,17610,17612,17614,17616,17618,17620,17622,17624,17626,17628,17630,17632,17634,17636,17638,17640,17642,17644,17646,17648,17650,17652,17654,17656,17658,17660,17662,17664,17666,17668,17670,17672,17674,17676,17678,17680,17682,17684,17686,17688,17690,17692,17694,17696,17698,17700,17702,17704,17706,17708,17710,17712,17714,17716,17718,17720,17722,17724,17726,17728,17730,17732,17734,17736,17738,17740,17742,17744,17746,17748,17750,17752,17754,17756,17758,17760,17762,17764,17766,17768,17770,17772,17774,17776,17778,17780,17782,17784,17786,17788,17790,17792,17794,17796,17798,17800,17802,17804,17806,17808,17810,17812,17814,17816,17818,17820,17822,17824,17826,17828,17830,17832,17834,17836,17838,17840,17842,17844,17846,17848,17850,17852,17854,17856,17858,17860,17862,17864,17866,17868,17870,17872,17874,17876,17878,17880,17882,17884,17886,17888,17890,17892,17894,17896,17898,17900,17902,17904,17906,17908,17910,17912,17914,17916,17918,17920,17922,17924,17926,17928,17930,17932,17934,17936,17938,17940,17942,17944,17946,17948,17950,17952,17954,17956,17958,17960,17962,17964,17966,17968,17970,17972,17974,17976,17978,17980,17982,17984,17986,17988,17990,17992,17994,17996,17998,18000,18002,18004,18006,18008,18010,18012,18014,18016,18018,18020,18022,18024,18026,18028,18030,18032,18034,18036,18038,18040,18042,18044,18046,18048,18050,18052,18054,18056,18058,18060,18062,18064,18066,18068,18070,18072,18074,18076,18078,18080,18082,18084,18086,18088,18090,18092,18094,18096,18098,18100,18102,18104,18106,18108,18110,18112,18114,18116,18118,18120,18122,18124,18126,18128,18130,18132,18134,18136,18138,18140,18142,18144,18146,18148,18150,18152,18154,18156,18158,18160,18162,18164,18166,18168,18170,18172,18174,18176,18178,18180,18182,18184,18186,18188,18190,18192,18194,18196,18198,18200,18202,18204,18206,18208,18210,18212,18214,18216,18218,18220,18222,18224,18226,18228,18230,18232,18234,18236,18238,18240,18242,18244,18246,18248,18250,18252,18254,18256,18258,18260,18262,18264,18266,18268,18270,18272,18274,18276,18278,18280,18282,18284,18286,18288,18290,18292,18294,18296,18298,18300,18302,18304,18306,18308,18310,18312,18314,18316,18318,18320,18322,18324,18326,18328,18330,18332,18334,18336,18338,18340,18342,18344,18346,18348,18350,18352,18354,18356,18358,18360,18362,18364,18366,18368,18370,18372,18374,18376,18378,18380,18382,18384,18386,18388,18390,18392,18394,18396,18398,18400,18402,18404,18406,18408,18410,18412,18414,18416,18418,18420,18422,18424,18426,18428,18430,18432,18434,18436,18438,18440,18442,18444,18446,18448,18450,18452,18454,18456,18458,18460,18462,18464,18466,18468,18470,18472,18474,18476,18478,18480,18482,18484,18486,18488,18490,18492,18494,18496,18498,18500,18502,18504,18506,18508,18510,18512,18514,18516,18518,18520,18522,18524,18526,18528,18530,18532,18534,18536,18538,18540,18542,18544,18546,18548,18550,18552,18554,18556,18558,18560,18562,18564,18566,18568,18570,18572,18574,18576,18578,18580,18582,18584,18586,18588,18590,18592,18594,18596,18598,18600,18602,18604,18606,18608,18610,18612,18614,18616,18618,18620,18622,18624,18626,18628,18630,18632,18634,18636,18638,18640,18642,18644,18646,18648,18650,18652,18654,18656,18658,18660,18662,18664,18666,18668,18670,18672,18674,18676,18678,18680,18682,18684,18686,18688,18690,18692,18694,18696,18698,18700,18702,18704,18706,18708,18710,18712,18714,18716,18718,18720,18722,18724,18726,18728,18730,18732,18734,18736,18738,18740,18742,18744,18746,18748,18750,18752,18754,18756,18758,18760,18762,18764,18766,18768,18770,18772,18774,18776,18778,18780,18782,18784,18786,18788,18790,18792,18794,18796,18798,18800,18802,18804,18806,18808,18810,18812,18814,18816,18818,18820,18822,18824,18826,18828,18830,18832,18834,18836,18838,18840,18842,18844,18846,18848,18850,18852,18854,18856,18858,18860,18862,18864,18866,18868,18870,18872,18874,18876,18878,18880,18882,18884,18886,18888,18890,18892,18894,18896,18898,18900,18902,18904,18906,18908,18910,18912,18914,18916,18918,18920,18922,18924,18926,18928,18930,18932,18934,18936,18938,18940,18942,18944,18946,18948,18950,18952,18954,18956,18958,18960,18962,18964,18966,18968,18970,18972,18974,18976,18978,18980,18982,18984,18986,18988,18990,18992,18994,18996,18998,19000,19002,19004,19006,19008,19010,19012,19014,19016,19018,19020,19022,19024,19026,19028,19030,19032,19034,19036,19038,19040,19042,19044,19046,19048,19050,19052,19054,19056,19058,19060,19062,19064,19066,19068,19070,19072,19074,19076,19078,19080,19082,19084,19086,19088,19090,19092,19094,19096,19098,19100,19102,19104,19106,19108,19110,19112,19114,19116,19118,19120,19122,19124,19126,19128,19130,19132,19134,19136,19138,19140,19142,19144,19146,19148,19150,19152,19154,19156,19158,19160,19162,19164,19166,19168,19170,19172,19174,19176,19178,19180,19182,19184,19186,19188,19190,19192,19194,19196,19198,19200,19202,19204,19206,19208,19210,19212,19214,19216,19218,19220,19222,19224,19226,19228,19230,19232,19234,19236,19238,19240,19242,19244,19246,19248,19250,19252,19254,19256,19258,19260,19262,19264,19266,19268,19270,19272,19274,19276,19278,19280,19282,19284,19286,19288,19290,19292,19294,19296,19298,19300,19302,19304,19306,19308,19310,19312,19314,19316,19318,19320,19322,19324,19326,19328,19330,19332,19334,19336,19338,19340,19342,19344,19346,19348,19350,19352,19354,19356,19358,19360,19362,19364,19366,19368,19370,19372,19374,19376,19378,19380,19382,19384,19386,19388,19390,19392,19394,19396,19398,19400,19402,19404,19406,19408,19410,19412,19414,19416,19418,19420,19422,19424,19426,19428,19430,19432,19434,19436,19438,19440,19442,19444,19446,19448,19450,19452,19454,19456,19458,19460,19462,19464,19466,19468,19470,19472,19474,19476,19478,19480,19482,19484,19486,19488,19490,19492,19494,19496,19498,19500,19502,19504,19506,19508,19510,19512,19514,19516,19518,19520,19522,19524,19526,19528,19530,19532,19534,19536,19538,19540,19542,19544,19546,19548,19550,19552,19554,19556,19558,19560,19562,19564,19566,19568,19570,19572,19574,19576,19578,19580,19582,19584,19586,19588,19590,19592,19594,19596,19598,19600,19602,19604,19606,19608,19610,19612,19614,19616,19618,19620,19622,19624,19626,19628,19630,19632,19634,19636,19638,19640,19642,19644,19646,19648,19650,19652,19654,19656,19658,19660,19662,19664,19666,19668,19670,19672,19674,19676,19678,19680,19682,19684,19686,19688,19690,19692,19694,19696,19698,19700,19702,19704,19706,19708,19710,19712,19714,19716,19718,19720,19722,19724,19726,19728,19730,19732,19734,19736,19738,19740,19742,19744,19746,19748,19750,19752,19754,19756,19758,19760,19762,19764,19766,19768,19770,19772,19774,19776,19778,19780,19782,19784,19786,19788,19790,19792,19794,19796,19798,19800,19802,19804,19806,19808,19810,19812,19814,19816,19818,19820,19822,19824,19826,19828,19830,19832,19834,19836,19838,19840,19842,19844,19846,19848,19850,19852,19854,19856,19858,19860,19862,19864,19866,19868,19870,19872,19874,19876,19878,19880,19882,19884,19886,19888,19890,19892,19894,19896,19898,19900,19902,19904,19906,19908,19910,19912,19914,19916,19918,19920,19922,19924,19926,19928,19930,19932,19934,19936,19938,19940,19942,19944,19946,19948,19950,19952,19954,19956,19958,19960,19962,19964,19966,19968,19970,19972,19974,19976,19978,19980,19982,19984,19986,19988,19990,19992,19994,19996,19998,20000,20002,20004,20006,20008,20010,20012,20014,20016,20018,20020,20022,20024,20026,20028,20030,20032,20034,20036,20038,20040,20042,20044,20046,20048,20050,20052,20054,20056,20058,20060,20062,20064,20066,20068,20070,20072,20074,20076,20078,20080,20082,20084,20086,20088,20090,20092,20094,20096,20098,20100,20102,20104,20106,20108,20110,20112,20114,20116,20118,20120,20122,20124,20126,20128,20130,20132,20134,20136,20138,20140,20142,20144,20146,20148,20150,20152,20154,20156,20158,20160,20162,20164,20166,20168,20170,20172,20174,20176,20178,20180,20182,20184,20186,20188,20190,20192,20194,20196,20198,20200,20202,20204,20206,20208,20210,20212,20214,20216,20218,20220,20222,20224,20226,20228,20230,20232,20234,20236,20238,20240,20242,20244,20246,20248,20250,20252,20254,20256,20258,20260,20262,20264,20266,20268,20270,20272,20274,20276,20278,20280,20282,20284,20286,20288,20290,20292,20294,20296,20298,20300,20302,20304,20306,20308,20310,20312,20314,20316,20318,20320,20322,20324,20326,20328,20330,20332,20334,20336,20338,20340,20342,20344,20346,20348,20350,20352,20354,20356,20358,20360,20362,20364,20366,20368,20370,20372,20374,20376,20378,20380,20382,20384,20386,20388,20390,20392,20394,20396,20398,20400,20402,20404,20406,20408,20410,20412,20414,20416,20418,20420,20422,20424,20426,20428,20430,20432,20434,20436,20438,20440,20442,20444,20446,20448,20450,20452,20454,20456,20458,20460,20462,20464,20466,20468,20470,20472,20474,20476,20478,20480,20482,20484,20486,20488,20490,20492,20494,20496,20498,20500,20502,20504,20506,20508,20510,20512,20514,20516,20518,20520,20522,20524,20526,20528,20530,20532,20534,20536,20538,20540,20542,20544,20546,20548,20550,20552,20554,20556,20558,20560,20562,20564,20566,20568,20570,20572,20574,20576,20578,20580,20582,20584,20586,20588,20590,20592,20594,20596,20598,20600,20602,20604,20606,20608,20610,20612,20614,20616,20618,20620,20622,20624,20626,20628,20630,20632,20634,20636,20638,20640,20642,20644,20646,20648,20650,20652,20654,20656,20658,20660,20662,20664,20666,20668,20670,20672,20674,20676,20678,20680,20682,20684,20686,20688,20690,20692,20694,20696,20698,20700,20702,20704,20706,20708,20710,20712,20714,20716,20718,20720,20722,20724,20726,20728,20730,20732,20734,20736,20738,20740,20742,20744,20746,20748,20750,20752,20754,20756,20758,20760,20762,20764,20766,20768,20770,20772,20774,20776,20778,20780,20782,20784,20786,20788,20790,20792,20794,20796,20798,20800,20802,20804,20806,20808,20810,20812,20814,20816,20818,20820,20822,20824,20826,20828,20830,20832,20834,20836,20838,20840,20842,20844,20846,20848,20850,20852,20854,20856,20858,20860,20862,20864,20866,20868,20870,20872,20874,20876,20878,20880,20882,20884,20886,20888,20890,20892,20894,20896,20898,20900,20902,20904,20906,20908,20910,20912,20914,20916,20918,20920,20922,20924,20926,20928,20930,20932,20934,20936,20938,20940,20942,20944,20946,20948,20950,20952,20954,20956,20958,20960,20962,20964,20966,20968,20970,20972,20974,20976,20978,20980,20982,20984,20986,20988,20990,20992,20994,20996,20998,21000,21002,21004,21006,21008,21010,21012,21014,21016,21018,21020,21022,21024,21026,21028,21030,21032,21034,21036,21038,21040,21042,21044,21046,21048,21050,21052,21054,21056,21058,21060,21062,21064,21066,21068,21070,21072,21074,21076,21078,21080,21082,21084,21086,21088,21090,21092,21094,21096,21098,21100,21102,21104,21106,21108,21110,21112,21114,21116,21118,21120,21122,21124,21126,21128,21130,21132,21134,21136,21138,21140,21142,21144,21146,21148,21150,21152,21154,21156,21158,21160,21162,21164,21166,21168,21170,21172,21174,21176,21178,21180,21182,21184,21186,21188,21190,21192,21194,21196,21198,21200,21202,21204,21206,21208,21210,21212,21214,21216,21218,21220,21222,21224,21226,21228,21230,21232,21234,21236,21238,21240,21242,21244,21246,21248,21250,21252,21254,21256,21258,21260,21262,21264,21266,21268,21270,21272,21274,21276,21278,21280,21282,21284,21286,21288,21290,21292,21294,21296,21298,21300,21302,21304,21306,21308,21310,21312,21314,21316,21318,21320,21322,21324,21326,21328,21330,21332,21334,21336,21338,21340,21342,21344,21346,21348,21350,21352,21354,21356,21358,21360,21362,21364,21366,21368,21370,21372,21374,21376,21378,21380,21382,21384,21386,21388,21390,21392,21394,21396,21398,21400,21402,21404,21406,21408,21410,21412,21414,21416,21418,21420,21422,21424,21426,21428,21430,21432,21434,21436,21438,21440,21442,21444,21446,21448,21450,21452,21454,21456,21458,21460,21462,21464,21466,21468,21470,21472,21474,21476,21478,21480,21482,21484,21486,21488,21490,21492,21494,21496,21498,21500,21502,21504,21506,21508,21510,21512,21514,21516,21518,21520,21522,21524,21526,21528,21530,21532,21534,21536,21538,21540,21542,21544,21546,21548,21550,21552,21554,21556,21558,21560,21562,21564,21566,21568,21570,21572,21574,21576,21578,21580,21582,21584,21586,21588,21590,21592,21594,21596,21598,21600,21602,21604,21606,21608,21610,21612,21614,21616,21618,21620,21622,21624,21626,21628,21630,21632,21634,21636,21638,21640,21642,21644,21646,21648,21650,21652,21654,21656,21658,21660,21662,21664,21666,21668,21670,21672,21674,21676,21678,21680,21682,21684,21686,21688,21690,21692,21694,21696,21698,21700,21702,21704,21706,21708,21710,21712,21714,21716,21718,21720,21722,21724,21726,21728,21730,21732,21734,21736,21738,21740,21742,21744,21746,21748,21750,21752,21754,21756,21758,21760,21762,21764,21766,21768,21770,21772,21774,21776,21778,21780,21782,21784,21786,21788,21790,21792,21794,21796,21798,21800,21802,21804,21806,21808,21810,21812,21814,21816,21818,21820,21822,21824,21826,21828,21830,21832,21834,21836,21838,21840,21842,21844,21846,21848,21850,21852,21854,21856,21858,21860,21862,21864,21866,21868,21870,21872,21874,21876,21878,21880,21882,21884,21886,21888,21890,21892,21894,21896,21898,21900,21902,21904,21906,21908,21910,21912,21914,21916,21918,21920,21922,21924,21926,21928,21930,21932,21934,21936,21938,21940,21942,21944,21946,21948,21950,21952,21954,21956,21958,21960,21962,21964,21966,21968,21970,21972,21974,21976,21978,21980,21982,21984,21986,21988,21990,21992,21994,21996,21998,22000,22002,22004,22006,22008,22010,22012,22014,22016,22018,22020,22022,22024,22026,22028,22030,22032,22034,22036,22038,22040,22042,22044,22046,22048,22050,22052,22054,22056,22058,22060,22062,22064,22066,22068,22070,22072,22074,22076,22078,22080,22082,22084,22086,22088,22090,22092,22094,22096,22098,22100,22102,22104,22106,22108,22110,22112,22114,22116,22118,22120,22122,22124,22126,22128,22130,22132,22134,22136,22138,22140,22142,22144,22146,22148,22150,22152,22154,22156,22158,22160,22162,22164,22166,22168,22170,22172,22174,22176,22178,22180,22182,22184,22186,22188,22190,22192,22194,22196,22198,22200,22202,22204,22206,22208,22210,22212,22214,22216,22218,22220,22222,22224,22226,22228,22230,22232,22234,22236,22238,22240,22242,22244,22246,22248,22250,22252,22254,22256,22258,22260,22262,22264,22266,22268,22270,22272,22274,22276,22278,22280,22282,22284,22286,22288,22290,22292,22294,22296,22298,22300,22302,22304,22306,22308,22310,22312,22314,22316,22318,22320,22322,22324,22326,22328,22330,22332,22334,22336,22338,22340,22342,22344,22346,22348,22350,22352,22354,22356,22358,22360,22362,22364,22366,22368,22370,22372,22374,22376,22378,22380,22382,22384,22386,22388,22390,22392,22394,22396,22398,22400,22402,22404,22406,22408,22410,22412,22414,22416,22418,22420,22422,22424,22426,22428,22430,22432,22434,22436,22438,22440,22442,22444,22446,22448,22450,22452,22454,22456,22458,22460,22462,22464,22466,22468,22470,22472,22474,22476,22478,22480,22482,22484,22486,22488,22490,22492,22494,22496,22498,22500,22502,22504,22506,22508,22510,22512,22514,22516,22518,22520,22522,22524,22526,22528,22530,22532,22534,22536,22538,22540,22542,22544,22546,22548,22550,22552,22554,22556,22558,22560,22562,22564,22566,22568,22570,22572,22574,22576,22578,22580,22582,22584,22586,22588,22590,22592,22594,22596,22598,22600,22602,22604,22606,22608,22610,22612,22614,22616,22618,22620,22622,22624,22626,22628,22630,22632,22634,22636,22638,22640,22642,22644,22646,22648,22650,22652,22654,22656,22658,22660,22662,22664,22666,22668,22670,22672,22674,22676,22678,22680,22682,22684,22686,22688,22690,22692,22694,22696,22698,22700,22702,22704,22706,22708,22710,22712,22714,22716,22718,22720,22722,22724,22726,22728,22730,22732,22734,22736,22738,22740,22742,22744,22746,22748,22750,22752,22754,22756,22758,22760,22762,22764,22766,22768,22770,22772,22774,22776,22778,22780,22782,22784,22786,22788,22790,22792,22794,22796,22798,22800,22802,22804,22806,22808,22810,22812,22814,22816,22818,22820,22822,22824,22826,22828,22830,22832,22834,22836,22838,22840,22842,22844,22846,22848,22850,22852,22854,22856,22858,22860,22862,22864,22866,22868,22870,22872,22874,22876,22878,22880,22882,22884,22886,22888,22890,22892,22894,22896,22898,22900,22902,22904,22906,22908,22910,22912,22914,22916,22918,22920,22922,22924,22926,22928,22930,22932,22934,22936,22938,22940,22942,22944,22946,22948,22950,22952,22954,22956,22958,22960,22962,22964,22966,22968,22970,22972,22974,22976,22978,22980,22982,22984,22986,22988,22990,22992,22994,22996,22998,23000,23002,23004,23006,23008,23010,23012,23014,23016,23018,23020,23022,23024,23026,23028,23030,23032,23034,23036,23038,23040,23042,23044,23046,23048,23050,23052,23054,23056,23058,23060,23062,23064,23066,23068,23070,23072,23074,23076,23078,23080,23082,23084,23086,23088,23090,23092,23094,23096,23098,23100,23102,23104,23106,23108,23110,23112,23114,23116,23118,23120,23122,23124,23126,23128,23130,23132,23134,23136,23138,23140,23142,23144,23146,23148,23150,23152,23154,23156,23158,23160,23162,23164,23166,23168,23170,23172,23174,23176,23178,23180,23182,23184,23186,23188,23190,23192,23194,23196,23198,23200,23202,23204,23206,23208,23210,23212,23214,23216,23218,23220,23222,23224,23226,23228,23230,23232,23234,23236,23238,23240,23242,23244,23246,23248,23250,23252,23254,23256,23258,23260,23262,23264,23266,23268,23270,23272,23274,23276,23278,23280,23282,23284,23286,23288,23290,23292,23294,23296,23298,23300,23302,23304,23306,23308,23310,23312,23314,23316,23318,23320,23322,23324,23326,23328,23330,23332,23334,23336,23338,23340,23342,23344,23346,23348,23350,23352,23354,23356,23358,23360,23362,23364,23366,23368,23370,23372,23374,23376,23378,23380,23382,23384,23386,23388,23390,23392,23394,23396,23398,23400,23402,23404,23406,23408,23410,23412,23414,23416,23418,23420,23422,23424,23426,23428,23430,23432,23434,23436,23438,23440,23442,23444,23446,23448,23450,23452,23454,23456,23458,23460,23462,23464,23466,23468,23470,23472,23474,23476,23478,23480,23482,23484,23486,23488,23490,23492,23494,23496,23498,23500,23502,23504,23506,23508,23510,23512,23514,23516,23518,23520,23522,23524,23526,23528,23530,23532,23534,23536,23538,23540,23542,23544,23546,23548,23550,23552,23554,23556,23558,23560,23562,23564,23566,23568,23570,23572,23574,23576,23578,23580,23582,23584,23586,23588,23590,23592,23594,23596,23598,23600,23602,23604,23606,23608,23610,23612,23614,23616,23618,23620,23622,23624,23626,23628,23630,23632,23634,23636,23638,23640,23642,23644,23646,23648,23650,23652,23654,23656,23658,23660,23662,23664,23666,23668,23670,23672,23674,23676,23678,23680,23682,23684,23686,23688,23690,23692,23694,23696,23698,23700,23702,23704,23706,23708,23710,23712,23714,23716,23718,23720,23722,23724,23726,23728,23730,23732,23734,23736,23738,23740,23742,23744,23746,23748,23750,23752,23754,23756,23758,23760,23762,23764,23766,23768,23770,23772,23774,23776,23778,23780,23782,23784,23786,23788,23790,23792,23794,23796,23798,23800,23802,23804,23806,23808,23810,23812,23814,23816,23818,23820,23822,23824,23826,23828,23830,23832,23834,23836,23838,23840,23842,23844,23846,23848,23850,23852,23854,23856,23858,23860,23862,23864,23866,23868,23870,23872,23874,23876,23878,23880,23882,23884,23886,23888,23890,23892,23894,23896,23898,23900,23902,23904,23906,23908,23910,23912,23914,23916,23918,23920,23922,23924,23926,23928,23930,23932,23934,23936,23938,23940,23942,23944,23946,23948,23950,23952,23954,23956,23958,23960,23962,23964,23966,23968,23970,23972,23974,23976,23978,23980,23982,23984,23986,23988,23990,23992,23994,23996,23998,24000,24002,24004,24006,24008,24010,24012,24014,24016,24018,24020,24022,24024,24026,24028,24030,24032,24034,24036,24038,24040,24042,24044,24046,24048,24050,24052,24054,24056,24058,24060,24062,24064,24066,24068,24070,24072,24074,24076,24078,24080,24082,24084,24086,24088,24090,24092,24094,24096,24098,24100,24102,24104,24106,24108,24110,24112,24114,24116,24118,24120,24122,24124,24126,24128,24130,24132,24134,24136,24138,24140,24142,24144,24146,24148,24150,24152,24154,24156,24158,24160,24162,24164,24166,24168,24170,24172,24174,24176,24178,24180,24182,24184,24186,24188,24190,24192,24194,24196,24198,24200,24202,24204,24206,24208,24210,24212,24214,24216,24218,24220,24222,24224,24226,24228,24230,24232,24234,24236,24238,24240,24242,24244,24246,24248,24250,24252,24254,24256,24258,24260,24262,24264,24266,24268,24270,24272,24274,24276,24278,24280,24282,24284,24286,24288,24290,24292,24294,24296,24298,24300,24302,24304,24306,24308,24310,24312,24314,24316,24318,24320,24322,24324,24326,24328,24330,24332,24334,24336,24338,24340,24342,24344,24346,24348,24350,24352,24354,24356,24358,24360,24362,24364,24366,24368,24370,24372,24374,24376,24378,24380,24382,24384,24386,24388,24390,24392,24394,24396,24398,24400,24402,24404,24406,24408,24410,24412,24414,24416,24418,24420,24422,24424,24426,24428,24430,24432,24434,24436,24438,24440,24442,24444,24446,24448,24450,24452,24454,24456,24458,24460,24462,24464,24466,24468,24470,24472,24474,24476,24478,24480,24482,24484,24486,24488,24490,24492,24494,24496,24498,24500,24502,24504,24506,24508,24510,24512,24514,24516,24518,24520,24522,24524,24526,24528,24530,24532,24534,24536,24538,24540,24542,24544,24546,24548,24550,24552,24554,24556,24558,24560,24562,24564,24566,24568,24570,24572,24574,24576,24578,24580,24582,24584,24586,24588,24590,24592,24594,24596,24598,24600,24602,24604,24606,24608,24610,24612,24614,24616,24618,24620,24622,24624,24626,24628,24630,24632,24634,24636,24638,24640,24642,24644,24646,24648,24650,24652,24654,24656,24658,24660,24662,24664,24666,24668,24670,24672,24674,24676,24678,24680,24682,24684,24686,24688,24690,24692,24694,24696,24698,24700,24702,24704,24706,24708,24710,24712,24714,24716,24718,24720,24722,24724,24726,24728,24730,24732,24734,24736,24738,24740,24742,24744,24746,24748,24750,24752,24754,24756,24758,24760,24762,24764,24766,24768,24770,24772,24774,24776,24778,24780,24782,24784,24786,24788,24790,24792,24794,24796,24798,24800,24802,24804,24806,24808,24810,24812,24814,24816,24818,24820,24822,24824,24826,24828,24830,24832,24834,24836,24838,24840,24842,24844,24846,24848,24850,24852,24854,24856,24858,24860,24862,24864,24866,24868,24870,24872,24874,24876,24878,24880,24882,24884,24886,24888,24890,24892,24894,24896,24898,24900,24902,24904,24906,24908,24910,24912,24914,24916,24918,24920,24922,24924,24926,24928,24930,24932,24934,24936,24938,24940,24942,24944,24946,24948,24950,24952,24954,24956,24958,24960,24962,24964,24966,24968,24970,24972,24974,24976,24978,24980,24982,24984,24986,24988,24990,24992,24994,24996,24998,25000,25002,25004,25006,25008,25010,25012,25014,25016,25018,25020,25022,25024,25026,25028,25030,25032,25034,25036,25038,25040,25042,25044,25046,25048,25050,25052,25054,25056,25058,25060,25062,25064,25066,25068,25070,25072,25074,25076,25078,25080,25082,25084,25086,25088,25090,25092,25094,25096,25098,25100,25102,25104,25106,25108,25110,25112,25114,25116,25118,25120,25122,25124,25126,25128,25130,25132,25134,25136,25138,25140,25142,25144,25146,25148,25150,25152,25154,25156,25158,25160,25162,25164,25166,25168,25170,25172,25174,25176,25178,25180,25182,25184,25186,25188,25190,25192,25194,25196]
,16021
)
assert result3 == [8010, 8011]
print("OK")
| 2,822.6
| 70,040
| 0.817941
|
from typing import List
def twoSum(nums: List[int], target: int) -> List[int]:
length = len(nums)
for i,v1 in enumerate(nums):
sliced = nums[i + 1: length]
for j,v2 in enumerate(sliced):
result = v1 + v2
if result == target:
return [i, i+j+1]
return []
result = twoSum([6, 1, 4, 5], 7)
assert result == [0, 1]
result2 = twoSum([2, 8, 4, 5], 13)
assert result2 == [1, 3]
result3 = twoSum(
[0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,66,68,70,72,74,76,78,80,82,84,86,88,90,92,94,96,98,100,102,104,106,108,110,112,114,116,118,120,122,124,126,128,130,132,134,136,138,140,142,144,146,148,150,152,154,156,158,160,162,164,166,168,170,172,174,176,178,180,182,184,186,188,190,192,194,196,198,200,202,204,206,208,210,212,214,216,218,220,222,224,226,228,230,232,234,236,238,240,242,244,246,248,250,252,254,256,258,260,262,264,266,268,270,272,274,276,278,280,282,284,286,288,290,292,294,296,298,300,302,304,306,308,310,312,314,316,318,320,322,324,326,328,330,332,334,336,338,340,342,344,346,348,350,352,354,356,358,360,362,364,366,368,370,372,374,376,378,380,382,384,386,388,390,392,394,396,398,400,402,404,406,408,410,412,414,416,418,420,422,424,426,428,430,432,434,436,438,440,442,444,446,448,450,452,454,456,458,460,462,464,466,468,470,472,474,476,478,480,482,484,486,488,490,492,494,496,498,500,502,504,506,508,510,512,514,516,518,520,522,524,526,528,530,532,534,536,538,540,542,544,546,548,550,552,554,556,558,560,562,564,566,568,570,572,574,576,578,580,582,584,586,588,590,592,594,596,598,600,602,604,606,608,610,612,614,616,618,620,622,624,626,628,630,632,634,636,638,640,642,644,646,648,650,652,654,656,658,660,662,664,666,668,670,672,674,676,678,680,682,684,686,688,690,692,694,696,698,700,702,704,706,708,710,712,714,716,718,720,722,724,726,728,730,732,734,736,738,740,742,744,746,748,750,752,754,756,758,760,762,764,766,768,770,772,774,776,778,780,782,784,786,788,790,792,794,796,798,800,802,804,806,808,810,812,814,816,818,820,822,824,826,828,830,832,834,836,838,840,842,844,846,848,850,852,854,856,858,860,862,864,866,868,870,872,874,876,878,880,882,884,886,888,890,892,894,896,898,900,902,904,906,908,910,912,914,916,918,920,922,924,926,928,930,932,934,936,938,940,942,944,946,948,950,952,954,956,958,960,962,964,966,968,970,972,974,976,978,980,982,984,986,988,990,992,994,996,998,1000,1002,1004,1006,1008,1010,1012,1014,1016,1018,1020,1022,1024,1026,1028,1030,1032,1034,1036,1038,1040,1042,1044,1046,1048,1050,1052,1054,1056,1058,1060,1062,1064,1066,1068,1070,1072,1074,1076,1078,1080,1082,1084,1086,1088,1090,1092,1094,1096,1098,1100,1102,1104,1106,1108,1110,1112,1114,1116,1118,1120,1122,1124,1126,1128,1130,1132,1134,1136,1138,1140,1142,1144,1146,1148,1150,1152,1154,1156,1158,1160,1162,1164,1166,1168,1170,1172,1174,1176,1178,1180,1182,1184,1186,1188,1190,1192,1194,1196,1198,1200,1202,1204,1206,1208,1210,1212,1214,1216,1218,1220,1222,1224,1226,1228,1230,1232,1234,1236,1238,1240,1242,1244,1246,1248,1250,1252,1254,1256,1258,1260,1262,1264,1266,1268,1270,1272,1274,1276,1278,1280,1282,1284,1286,1288,1290,1292,1294,1296,1298,1300,1302,1304,1306,1308,1310,1312,1314,1316,1318,1320,1322,1324,1326,1328,1330,1332,1334,1336,1338,1340,1342,1344,1346,1348,1350,1352,1354,1356,1358,1360,1362,1364,1366,1368,1370,1372,1374,1376,1378,1380,1382,1384,1386,1388,1390,1392,1394,1396,1398,1400,1402,1404,1406,1408,1410,1412,1414,1416,1418,1420,1422,1424,1426,1428,1430,1432,1434,1436,1438,1440,1442,1444,1446,1448,1450,1452,1454,1456,1458,1460,1462,1464,1466,1468,1470,1472,1474,1476,1478,1480,1482,1484,1486,1488,1490,1492,1494,1496,1498,1500,1502,1504,1506,1508,1510,1512,1514,1516,1518,1520,1522,1524,1526,1528,1530,1532,1534,1536,1538,1540,1542,1544,1546,1548,1550,1552,1554,1556,1558,1560,1562,1564,1566,1568,1570,1572,1574,1576,1578,1580,1582,1584,1586,1588,1590,1592,1594,1596,1598,1600,1602,1604,1606,1608,1610,1612,1614,1616,1618,1620,1622,1624,1626,1628,1630,1632,1634,1636,1638,1640,1642,1644,1646,1648,1650,1652,1654,1656,1658,1660,1662,1664,1666,1668,1670,1672,1674,1676,1678,1680,1682,1684,1686,1688,1690,1692,1694,1696,1698,1700,1702,1704,1706,1708,1710,1712,1714,1716,1718,1720,1722,1724,1726,1728,1730,1732,1734,1736,1738,1740,1742,1744,1746,1748,1750,1752,1754,1756,1758,1760,1762,1764,1766,1768,1770,1772,1774,1776,1778,1780,1782,1784,1786,1788,1790,1792,1794,1796,1798,1800,1802,1804,1806,1808,1810,1812,1814,1816,1818,1820,1822,1824,1826,1828,1830,1832,1834,1836,1838,1840,1842,1844,1846,1848,1850,1852,1854,1856,1858,1860,1862,1864,1866,1868,1870,1872,1874,1876,1878,1880,1882,1884,1886,1888,1890,1892,1894,1896,1898,1900,1902,1904,1906,1908,1910,1912,1914,1916,1918,1920,1922,1924,1926,1928,1930,1932,1934,1936,1938,1940,1942,1944,1946,1948,1950,1952,1954,1956,1958,1960,1962,1964,1966,1968,1970,1972,1974,1976,1978,1980,1982,1984,1986,1988,1990,1992,1994,1996,1998,2000,2002,2004,2006,2008,2010,2012,2014,2016,2018,2020,2022,2024,2026,2028,2030,2032,2034,2036,2038,2040,2042,2044,2046,2048,2050,2052,2054,2056,2058,2060,2062,2064,2066,2068,2070,2072,2074,2076,2078,2080,2082,2084,2086,2088,2090,2092,2094,2096,2098,2100,2102,2104,2106,2108,2110,2112,2114,2116,2118,2120,2122,2124,2126,2128,2130,2132,2134,2136,2138,2140,2142,2144,2146,2148,2150,2152,2154,2156,2158,2160,2162,2164,2166,2168,2170,2172,2174,2176,2178,2180,2182,2184,2186,2188,2190,2192,2194,2196,2198,2200,2202,2204,2206,2208,2210,2212,2214,2216,2218,2220,2222,2224,2226,2228,2230,2232,2234,2236,2238,2240,2242,2244,2246,2248,2250,2252,2254,2256,2258,2260,2262,2264,2266,2268,2270,2272,2274,2276,2278,2280,2282,2284,2286,2288,2290,2292,2294,2296,2298,2300,2302,2304,2306,2308,2310,2312,2314,2316,2318,2320,2322,2324,2326,2328,2330,2332,2334,2336,2338,2340,2342,2344,2346,2348,2350,2352,2354,2356,2358,2360,2362,2364,2366,2368,2370,2372,2374,2376,2378,2380,2382,2384,2386,2388,2390,2392,2394,2396,2398,2400,2402,2404,2406,2408,2410,2412,2414,2416,2418,2420,2422,2424,2426,2428,2430,2432,2434,2436,2438,2440,2442,2444,2446,2448,2450,2452,2454,2456,2458,2460,2462,2464,2466,2468,2470,2472,2474,2476,2478,2480,2482,2484,2486,2488,2490,2492,2494,2496,2498,2500,2502,2504,2506,2508,2510,2512,2514,2516,2518,2520,2522,2524,2526,2528,2530,2532,2534,2536,2538,2540,2542,2544,2546,2548,2550,2552,2554,2556,2558,2560,2562,2564,2566,2568,2570,2572,2574,2576,2578,2580,2582,2584,2586,2588,2590,2592,2594,2596,2598,2600,2602,2604,2606,2608,2610,2612,2614,2616,2618,2620,2622,2624,2626,2628,2630,2632,2634,2636,2638,2640,2642,2644,2646,2648,2650,2652,2654,2656,2658,2660,2662,2664,2666,2668,2670,2672,2674,2676,2678,2680,2682,2684,2686,2688,2690,2692,2694,2696,2698,2700,2702,2704,2706,2708,2710,2712,2714,2716,2718,2720,2722,2724,2726,2728,2730,2732,2734,2736,2738,2740,2742,2744,2746,2748,2750,2752,2754,2756,2758,2760,2762,2764,2766,2768,2770,2772,2774,2776,2778,2780,2782,2784,2786,2788,2790,2792,2794,2796,2798,2800,2802,2804,2806,2808,2810,2812,2814,2816,2818,2820,2822,2824,2826,2828,2830,2832,2834,2836,2838,2840,2842,2844,2846,2848,2850,2852,2854,2856,2858,2860,2862,2864,2866,2868,2870,2872,2874,2876,2878,2880,2882,2884,2886,2888,2890,2892,2894,2896,2898,2900,2902,2904,2906,2908,2910,2912,2914,2916,2918,2920,2922,2924,2926,2928,2930,2932,2934,2936,2938,2940,2942,2944,2946,2948,2950,2952,2954,2956,2958,2960,2962,2964,2966,2968,2970,2972,2974,2976,2978,2980,2982,2984,2986,2988,2990,2992,2994,2996,2998,3000,3002,3004,3006,3008,3010,3012,3014,3016,3018,3020,3022,3024,3026,3028,3030,3032,3034,3036,3038,3040,3042,3044,3046,3048,3050,3052,3054,3056,3058,3060,3062,3064,3066,3068,3070,3072,3074,3076,3078,3080,3082,3084,3086,3088,3090,3092,3094,3096,3098,3100,3102,3104,3106,3108,3110,3112,3114,3116,3118,3120,3122,3124,3126,3128,3130,3132,3134,3136,3138,3140,3142,3144,3146,3148,3150,3152,3154,3156,3158,3160,3162,3164,3166,3168,3170,3172,3174,3176,3178,3180,3182,3184,3186,3188,3190,3192,3194,3196,3198,3200,3202,3204,3206,3208,3210,3212,3214,3216,3218,3220,3222,3224,3226,3228,3230,3232,3234,3236,3238,3240,3242,3244,3246,3248,3250,3252,3254,3256,3258,3260,3262,3264,3266,3268,3270,3272,3274,3276,3278,3280,3282,3284,3286,3288,3290,3292,3294,3296,3298,3300,3302,3304,3306,3308,3310,3312,3314,3316,3318,3320,3322,3324,3326,3328,3330,3332,3334,3336,3338,3340,3342,3344,3346,3348,3350,3352,3354,3356,3358,3360,3362,3364,3366,3368,3370,3372,3374,3376,3378,3380,3382,3384,3386,3388,3390,3392,3394,3396,3398,3400,3402,3404,3406,3408,3410,3412,3414,3416,3418,3420,3422,3424,3426,3428,3430,3432,3434,3436,3438,3440,3442,3444,3446,3448,3450,3452,3454,3456,3458,3460,3462,3464,3466,3468,3470,3472,3474,3476,3478,3480,3482,3484,3486,3488,3490,3492,3494,3496,3498,3500,3502,3504,3506,3508,3510,3512,3514,3516,3518,3520,3522,3524,3526,3528,3530,3532,3534,3536,3538,3540,3542,3544,3546,3548,3550,3552,3554,3556,3558,3560,3562,3564,3566,3568,3570,3572,3574,3576,3578,3580,3582,3584,3586,3588,3590,3592,3594,3596,3598,3600,3602,3604,3606,3608,3610,3612,3614,3616,3618,3620,3622,3624,3626,3628,3630,3632,3634,3636,3638,3640,3642,3644,3646,3648,3650,3652,3654,3656,3658,3660,3662,3664,3666,3668,3670,3672,3674,3676,3678,3680,3682,3684,3686,3688,3690,3692,3694,3696,3698,3700,3702,3704,3706,3708,3710,3712,3714,3716,3718,3720,3722,3724,3726,3728,3730,3732,3734,3736,3738,3740,3742,3744,3746,3748,3750,3752,3754,3756,3758,3760,3762,3764,3766,3768,3770,3772,3774,3776,3778,3780,3782,3784,3786,3788,3790,3792,3794,3796,3798,3800,3802,3804,3806,3808,3810,3812,3814,3816,3818,3820,3822,3824,3826,3828,3830,3832,3834,3836,3838,3840,3842,3844,3846,3848,3850,3852,3854,3856,3858,3860,3862,3864,3866,3868,3870,3872,3874,3876,3878,3880,3882,3884,3886,3888,3890,3892,3894,3896,3898,3900,3902,3904,3906,3908,3910,3912,3914,3916,3918,3920,3922,3924,3926,3928,3930,3932,3934,3936,3938,3940,3942,3944,3946,3948,3950,3952,3954,3956,3958,3960,3962,3964,3966,3968,3970,3972,3974,3976,3978,3980,3982,3984,3986,3988,3990,3992,3994,3996,3998,4000,4002,4004,4006,4008,4010,4012,4014,4016,4018,4020,4022,4024,4026,4028,4030,4032,4034,4036,4038,4040,4042,4044,4046,4048,4050,4052,4054,4056,4058,4060,4062,4064,4066,4068,4070,4072,4074,4076,4078,4080,4082,4084,4086,4088,4090,4092,4094,4096,4098,4100,4102,4104,4106,4108,4110,4112,4114,4116,4118,4120,4122,4124,4126,4128,4130,4132,4134,4136,4138,4140,4142,4144,4146,4148,4150,4152,4154,4156,4158,4160,4162,4164,4166,4168,4170,4172,4174,4176,4178,4180,4182,4184,4186,4188,4190,4192,4194,4196,4198,4200,4202,4204,4206,4208,4210,4212,4214,4216,4218,4220,4222,4224,4226,4228,4230,4232,4234,4236,4238,4240,4242,4244,4246,4248,4250,4252,4254,4256,4258,4260,4262,4264,4266,4268,4270,4272,4274,4276,4278,4280,4282,4284,4286,4288,4290,4292,4294,4296,4298,4300,4302,4304,4306,4308,4310,4312,4314,4316,4318,4320,4322,4324,4326,4328,4330,4332,4334,4336,4338,4340,4342,4344,4346,4348,4350,4352,4354,4356,4358,4360,4362,4364,4366,4368,4370,4372,4374,4376,4378,4380,4382,4384,4386,4388,4390,4392,4394,4396,4398,4400,4402,4404,4406,4408,4410,4412,4414,4416,4418,4420,4422,4424,4426,4428,4430,4432,4434,4436,4438,4440,4442,4444,4446,4448,4450,4452,4454,4456,4458,4460,4462,4464,4466,4468,4470,4472,4474,4476,4478,4480,4482,4484,4486,4488,4490,4492,4494,4496,4498,4500,4502,4504,4506,4508,4510,4512,4514,4516,4518,4520,4522,4524,4526,4528,4530,4532,4534,4536,4538,4540,4542,4544,4546,4548,4550,4552,4554,4556,4558,4560,4562,4564,4566,4568,4570,4572,4574,4576,4578,4580,4582,4584,4586,4588,4590,4592,4594,4596,4598,4600,4602,4604,4606,4608,4610,4612,4614,4616,4618,4620,4622,4624,4626,4628,4630,4632,4634,4636,4638,4640,4642,4644,4646,4648,4650,4652,4654,4656,4658,4660,4662,4664,4666,4668,4670,4672,4674,4676,4678,4680,4682,4684,4686,4688,4690,4692,4694,4696,4698,4700,4702,4704,4706,4708,4710,4712,4714,4716,4718,4720,4722,4724,4726,4728,4730,4732,4734,4736,4738,4740,4742,4744,4746,4748,4750,4752,4754,4756,4758,4760,4762,4764,4766,4768,4770,4772,4774,4776,4778,4780,4782,4784,4786,4788,4790,4792,4794,4796,4798,4800,4802,4804,4806,4808,4810,4812,4814,4816,4818,4820,4822,4824,4826,4828,4830,4832,4834,4836,4838,4840,4842,4844,4846,4848,4850,4852,4854,4856,4858,4860,4862,4864,4866,4868,4870,4872,4874,4876,4878,4880,4882,4884,4886,4888,4890,4892,4894,4896,4898,4900,4902,4904,4906,4908,4910,4912,4914,4916,4918,4920,4922,4924,4926,4928,4930,4932,4934,4936,4938,4940,4942,4944,4946,4948,4950,4952,4954,4956,4958,4960,4962,4964,4966,4968,4970,4972,4974,4976,4978,4980,4982,4984,4986,4988,4990,4992,4994,4996,4998,5000,5002,5004,5006,5008,5010,5012,5014,5016,5018,5020,5022,5024,5026,5028,5030,5032,5034,5036,5038,5040,5042,5044,5046,5048,5050,5052,5054,5056,5058,5060,5062,5064,5066,5068,5070,5072,5074,5076,5078,5080,5082,5084,5086,5088,5090,5092,5094,5096,5098,5100,5102,5104,5106,5108,5110,5112,5114,5116,5118,5120,5122,5124,5126,5128,5130,5132,5134,5136,5138,5140,5142,5144,5146,5148,5150,5152,5154,5156,5158,5160,5162,5164,5166,5168,5170,5172,5174,5176,5178,5180,5182,5184,5186,5188,5190,5192,5194,5196,5198,5200,5202,5204,5206,5208,5210,5212,5214,5216,5218,5220,5222,5224,5226,5228,5230,5232,5234,5236,5238,5240,5242,5244,5246,5248,5250,5252,5254,5256,5258,5260,5262,5264,5266,5268,5270,5272,5274,5276,5278,5280,5282,5284,5286,5288,5290,5292,5294,5296,5298,5300,5302,5304,5306,5308,5310,5312,5314,5316,5318,5320,5322,5324,5326,5328,5330,5332,5334,5336,5338,5340,5342,5344,5346,5348,5350,5352,5354,5356,5358,5360,5362,5364,5366,5368,5370,5372,5374,5376,5378,5380,5382,5384,5386,5388,5390,5392,5394,5396,5398,5400,5402,5404,5406,5408,5410,5412,5414,5416,5418,5420,5422,5424,5426,5428,5430,5432,5434,5436,5438,5440,5442,5444,5446,5448,5450,5452,5454,5456,5458,5460,5462,5464,5466,5468,5470,5472,5474,5476,5478,5480,5482,5484,5486,5488,5490,5492,5494,5496,5498,5500,5502,5504,5506,5508,5510,5512,5514,5516,5518,5520,5522,5524,5526,5528,5530,5532,5534,5536,5538,5540,5542,5544,5546,5548,5550,5552,5554,5556,5558,5560,5562,5564,5566,5568,5570,5572,5574,5576,5578,5580,5582,5584,5586,5588,5590,5592,5594,5596,5598,5600,5602,5604,5606,5608,5610,5612,5614,5616,5618,5620,5622,5624,5626,5628,5630,5632,5634,5636,5638,5640,5642,5644,5646,5648,5650,5652,5654,5656,5658,5660,5662,5664,5666,5668,5670,5672,5674,5676,5678,5680,5682,5684,5686,5688,5690,5692,5694,5696,5698,5700,5702,5704,5706,5708,5710,5712,5714,5716,5718,5720,5722,5724,5726,5728,5730,5732,5734,5736,5738,5740,5742,5744,5746,5748,5750,5752,5754,5756,5758,5760,5762,5764,5766,5768,5770,5772,5774,5776,5778,5780,5782,5784,5786,5788,5790,5792,5794,5796,5798,5800,5802,5804,5806,5808,5810,5812,5814,5816,5818,5820,5822,5824,5826,5828,5830,5832,5834,5836,5838,5840,5842,5844,5846,5848,5850,5852,5854,5856,5858,5860,5862,5864,5866,5868,5870,5872,5874,5876,5878,5880,5882,5884,5886,5888,5890,5892,5894,5896,5898,5900,5902,5904,5906,5908,5910,5912,5914,5916,5918,5920,5922,5924,5926,5928,5930,5932,5934,5936,5938,5940,5942,5944,5946,5948,5950,5952,5954,5956,5958,5960,5962,5964,5966,5968,5970,5972,5974,5976,5978,5980,5982,5984,5986,5988,5990,5992,5994,5996,5998,6000,6002,6004,6006,6008,6010,6012,6014,6016,6018,6020,6022,6024,6026,6028,6030,6032,6034,6036,6038,6040,6042,6044,6046,6048,6050,6052,6054,6056,6058,6060,6062,6064,6066,6068,6070,6072,6074,6076,6078,6080,6082,6084,6086,6088,6090,6092,6094,6096,6098,6100,6102,6104,6106,6108,6110,6112,6114,6116,6118,6120,6122,6124,6126,6128,6130,6132,6134,6136,6138,6140,6142,6144,6146,6148,6150,6152,6154,6156,6158,6160,6162,6164,6166,6168,6170,6172,6174,6176,6178,6180,6182,6184,6186,6188,6190,6192,6194,6196,6198,6200,6202,6204,6206,6208,6210,6212,6214,6216,6218,6220,6222,6224,6226,6228,6230,6232,6234,6236,6238,6240,6242,6244,6246,6248,6250,6252,6254,6256,6258,6260,6262,6264,6266,6268,6270,6272,6274,6276,6278,6280,6282,6284,6286,6288,6290,6292,6294,6296,6298,6300,6302,6304,6306,6308,6310,6312,6314,6316,6318,6320,6322,6324,6326,6328,6330,6332,6334,6336,6338,6340,6342,6344,6346,6348,6350,6352,6354,6356,6358,6360,6362,6364,6366,6368,6370,6372,6374,6376,6378,6380,6382,6384,6386,6388,6390,6392,6394,6396,6398,6400,6402,6404,6406,6408,6410,6412,6414,6416,6418,6420,6422,6424,6426,6428,6430,6432,6434,6436,6438,6440,6442,6444,6446,6448,6450,6452,6454,6456,6458,6460,6462,6464,6466,6468,6470,6472,6474,6476,6478,6480,6482,6484,6486,6488,6490,6492,6494,6496,6498,6500,6502,6504,6506,6508,6510,6512,6514,6516,6518,6520,6522,6524,6526,6528,6530,6532,6534,6536,6538,6540,6542,6544,6546,6548,6550,6552,6554,6556,6558,6560,6562,6564,6566,6568,6570,6572,6574,6576,6578,6580,6582,6584,6586,6588,6590,6592,6594,6596,6598,6600,6602,6604,6606,6608,6610,6612,6614,6616,6618,6620,6622,6624,6626,6628,6630,6632,6634,6636,6638,6640,6642,6644,6646,6648,6650,6652,6654,6656,6658,6660,6662,6664,6666,6668,6670,6672,6674,6676,6678,6680,6682,6684,6686,6688,6690,6692,6694,6696,6698,6700,6702,6704,6706,6708,6710,6712,6714,6716,6718,6720,6722,6724,6726,6728,6730,6732,6734,6736,6738,6740,6742,6744,6746,6748,6750,6752,6754,6756,6758,6760,6762,6764,6766,6768,6770,6772,6774,6776,6778,6780,6782,6784,6786,6788,6790,6792,6794,6796,6798,6800,6802,6804,6806,6808,6810,6812,6814,6816,6818,6820,6822,6824,6826,6828,6830,6832,6834,6836,6838,6840,6842,6844,6846,6848,6850,6852,6854,6856,6858,6860,6862,6864,6866,6868,6870,6872,6874,6876,6878,6880,6882,6884,6886,6888,6890,6892,6894,6896,6898,6900,6902,6904,6906,6908,6910,6912,6914,6916,6918,6920,6922,6924,6926,6928,6930,6932,6934,6936,6938,6940,6942,6944,6946,6948,6950,6952,6954,6956,6958,6960,6962,6964,6966,6968,6970,6972,6974,6976,6978,6980,6982,6984,6986,6988,6990,6992,6994,6996,6998,7000,7002,7004,7006,7008,7010,7012,7014,7016,7018,7020,7022,7024,7026,7028,7030,7032,7034,7036,7038,7040,7042,7044,7046,7048,7050,7052,7054,7056,7058,7060,7062,7064,7066,7068,7070,7072,7074,7076,7078,7080,7082,7084,7086,7088,7090,7092,7094,7096,7098,7100,7102,7104,7106,7108,7110,7112,7114,7116,7118,7120,7122,7124,7126,7128,7130,7132,7134,7136,7138,7140,7142,7144,7146,7148,7150,7152,7154,7156,7158,7160,7162,7164,7166,7168,7170,7172,7174,7176,7178,7180,7182,7184,7186,7188,7190,7192,7194,7196,7198,7200,7202,7204,7206,7208,7210,7212,7214,7216,7218,7220,7222,7224,7226,7228,7230,7232,7234,7236,7238,7240,7242,7244,7246,7248,7250,7252,7254,7256,7258,7260,7262,7264,7266,7268,7270,7272,7274,7276,7278,7280,7282,7284,7286,7288,7290,7292,7294,7296,7298,7300,7302,7304,7306,7308,7310,7312,7314,7316,7318,7320,7322,7324,7326,7328,7330,7332,7334,7336,7338,7340,7342,7344,7346,7348,7350,7352,7354,7356,7358,7360,7362,7364,7366,7368,7370,7372,7374,7376,7378,7380,7382,7384,7386,7388,7390,7392,7394,7396,7398,7400,7402,7404,7406,7408,7410,7412,7414,7416,7418,7420,7422,7424,7426,7428,7430,7432,7434,7436,7438,7440,7442,7444,7446,7448,7450,7452,7454,7456,7458,7460,7462,7464,7466,7468,7470,7472,7474,7476,7478,7480,7482,7484,7486,7488,7490,7492,7494,7496,7498,7500,7502,7504,7506,7508,7510,7512,7514,7516,7518,7520,7522,7524,7526,7528,7530,7532,7534,7536,7538,7540,7542,7544,7546,7548,7550,7552,7554,7556,7558,7560,7562,7564,7566,7568,7570,7572,7574,7576,7578,7580,7582,7584,7586,7588,7590,7592,7594,7596,7598,7600,7602,7604,7606,7608,7610,7612,7614,7616,7618,7620,7622,7624,7626,7628,7630,7632,7634,7636,7638,7640,7642,7644,7646,7648,7650,7652,7654,7656,7658,7660,7662,7664,7666,7668,7670,7672,7674,7676,7678,7680,7682,7684,7686,7688,7690,7692,7694,7696,7698,7700,7702,7704,7706,7708,7710,7712,7714,7716,7718,7720,7722,7724,7726,7728,7730,7732,7734,7736,7738,7740,7742,7744,7746,7748,7750,7752,7754,7756,7758,7760,7762,7764,7766,7768,7770,7772,7774,7776,7778,7780,7782,7784,7786,7788,7790,7792,7794,7796,7798,7800,7802,7804,7806,7808,7810,7812,7814,7816,7818,7820,7822,7824,7826,7828,7830,7832,7834,7836,7838,7840,7842,7844,7846,7848,7850,7852,7854,7856,7858,7860,7862,7864,7866,7868,7870,7872,7874,7876,7878,7880,7882,7884,7886,7888,7890,7892,7894,7896,7898,7900,7902,7904,7906,7908,7910,7912,7914,7916,7918,7920,7922,7924,7926,7928,7930,7932,7934,7936,7938,7940,7942,7944,7946,7948,7950,7952,7954,7956,7958,7960,7962,7964,7966,7968,7970,7972,7974,7976,7978,7980,7982,7984,7986,7988,7990,7992,7994,7996,7998,8000,8002,8004,8006,8008,8010,8012,8014,8016,8018,8020,8022,8024,8026,8028,8030,8032,8034,8036,8038,8040,8042,8044,8046,8048,8050,8052,8054,8056,8058,8060,8062,8064,8066,8068,8070,8072,8074,8076,8078,8080,8082,8084,8086,8088,8090,8092,8094,8096,8098,8100,8102,8104,8106,8108,8110,8112,8114,8116,8118,8120,8122,8124,8126,8128,8130,8132,8134,8136,8138,8140,8142,8144,8146,8148,8150,8152,8154,8156,8158,8160,8162,8164,8166,8168,8170,8172,8174,8176,8178,8180,8182,8184,8186,8188,8190,8192,8194,8196,8198,8200,8202,8204,8206,8208,8210,8212,8214,8216,8218,8220,8222,8224,8226,8228,8230,8232,8234,8236,8238,8240,8242,8244,8246,8248,8250,8252,8254,8256,8258,8260,8262,8264,8266,8268,8270,8272,8274,8276,8278,8280,8282,8284,8286,8288,8290,8292,8294,8296,8298,8300,8302,8304,8306,8308,8310,8312,8314,8316,8318,8320,8322,8324,8326,8328,8330,8332,8334,8336,8338,8340,8342,8344,8346,8348,8350,8352,8354,8356,8358,8360,8362,8364,8366,8368,8370,8372,8374,8376,8378,8380,8382,8384,8386,8388,8390,8392,8394,8396,8398,8400,8402,8404,8406,8408,8410,8412,8414,8416,8418,8420,8422,8424,8426,8428,8430,8432,8434,8436,8438,8440,8442,8444,8446,8448,8450,8452,8454,8456,8458,8460,8462,8464,8466,8468,8470,8472,8474,8476,8478,8480,8482,8484,8486,8488,8490,8492,8494,8496,8498,8500,8502,8504,8506,8508,8510,8512,8514,8516,8518,8520,8522,8524,8526,8528,8530,8532,8534,8536,8538,8540,8542,8544,8546,8548,8550,8552,8554,8556,8558,8560,8562,8564,8566,8568,8570,8572,8574,8576,8578,8580,8582,8584,8586,8588,8590,8592,8594,8596,8598,8600,8602,8604,8606,8608,8610,8612,8614,8616,8618,8620,8622,8624,8626,8628,8630,8632,8634,8636,8638,8640,8642,8644,8646,8648,8650,8652,8654,8656,8658,8660,8662,8664,8666,8668,8670,8672,8674,8676,8678,8680,8682,8684,8686,8688,8690,8692,8694,8696,8698,8700,8702,8704,8706,8708,8710,8712,8714,8716,8718,8720,8722,8724,8726,8728,8730,8732,8734,8736,8738,8740,8742,8744,8746,8748,8750,8752,8754,8756,8758,8760,8762,8764,8766,8768,8770,8772,8774,8776,8778,8780,8782,8784,8786,8788,8790,8792,8794,8796,8798,8800,8802,8804,8806,8808,8810,8812,8814,8816,8818,8820,8822,8824,8826,8828,8830,8832,8834,8836,8838,8840,8842,8844,8846,8848,8850,8852,8854,8856,8858,8860,8862,8864,8866,8868,8870,8872,8874,8876,8878,8880,8882,8884,8886,8888,8890,8892,8894,8896,8898,8900,8902,8904,8906,8908,8910,8912,8914,8916,8918,8920,8922,8924,8926,8928,8930,8932,8934,8936,8938,8940,8942,8944,8946,8948,8950,8952,8954,8956,8958,8960,8962,8964,8966,8968,8970,8972,8974,8976,8978,8980,8982,8984,8986,8988,8990,8992,8994,8996,8998,9000,9002,9004,9006,9008,9010,9012,9014,9016,9018,9020,9022,9024,9026,9028,9030,9032,9034,9036,9038,9040,9042,9044,9046,9048,9050,9052,9054,9056,9058,9060,9062,9064,9066,9068,9070,9072,9074,9076,9078,9080,9082,9084,9086,9088,9090,9092,9094,9096,9098,9100,9102,9104,9106,9108,9110,9112,9114,9116,9118,9120,9122,9124,9126,9128,9130,9132,9134,9136,9138,9140,9142,9144,9146,9148,9150,9152,9154,9156,9158,9160,9162,9164,9166,9168,9170,9172,9174,9176,9178,9180,9182,9184,9186,9188,9190,9192,9194,9196,9198,9200,9202,9204,9206,9208,9210,9212,9214,9216,9218,9220,9222,9224,9226,9228,9230,9232,9234,9236,9238,9240,9242,9244,9246,9248,9250,9252,9254,9256,9258,9260,9262,9264,9266,9268,9270,9272,9274,9276,9278,9280,9282,9284,9286,9288,9290,9292,9294,9296,9298,9300,9302,9304,9306,9308,9310,9312,9314,9316,9318,9320,9322,9324,9326,9328,9330,9332,9334,9336,9338,9340,9342,9344,9346,9348,9350,9352,9354,9356,9358,9360,9362,9364,9366,9368,9370,9372,9374,9376,9378,9380,9382,9384,9386,9388,9390,9392,9394,9396,9398,9400,9402,9404,9406,9408,9410,9412,9414,9416,9418,9420,9422,9424,9426,9428,9430,9432,9434,9436,9438,9440,9442,9444,9446,9448,9450,9452,9454,9456,9458,9460,9462,9464,9466,9468,9470,9472,9474,9476,9478,9480,9482,9484,9486,9488,9490,9492,9494,9496,9498,9500,9502,9504,9506,9508,9510,9512,9514,9516,9518,9520,9522,9524,9526,9528,9530,9532,9534,9536,9538,9540,9542,9544,9546,9548,9550,9552,9554,9556,9558,9560,9562,9564,9566,9568,9570,9572,9574,9576,9578,9580,9582,9584,9586,9588,9590,9592,9594,9596,9598,9600,9602,9604,9606,9608,9610,9612,9614,9616,9618,9620,9622,9624,9626,9628,9630,9632,9634,9636,9638,9640,9642,9644,9646,9648,9650,9652,9654,9656,9658,9660,9662,9664,9666,9668,9670,9672,9674,9676,9678,9680,9682,9684,9686,9688,9690,9692,9694,9696,9698,9700,9702,9704,9706,9708,9710,9712,9714,9716,9718,9720,9722,9724,9726,9728,9730,9732,9734,9736,9738,9740,9742,9744,9746,9748,9750,9752,9754,9756,9758,9760,9762,9764,9766,9768,9770,9772,9774,9776,9778,9780,9782,9784,9786,9788,9790,9792,9794,9796,9798,9800,9802,9804,9806,9808,9810,9812,9814,9816,9818,9820,9822,9824,9826,9828,9830,9832,9834,9836,9838,9840,9842,9844,9846,9848,9850,9852,9854,9856,9858,9860,9862,9864,9866,9868,9870,9872,9874,9876,9878,9880,9882,9884,9886,9888,9890,9892,9894,9896,9898,9900,9902,9904,9906,9908,9910,9912,9914,9916,9918,9920,9922,9924,9926,9928,9930,9932,9934,9936,9938,9940,9942,9944,9946,9948,9950,9952,9954,9956,9958,9960,9962,9964,9966,9968,9970,9972,9974,9976,9978,9980,9982,9984,9986,9988,9990,9992,9994,9996,9998,10000,10002,10004,10006,10008,10010,10012,10014,10016,10018,10020,10022,10024,10026,10028,10030,10032,10034,10036,10038,10040,10042,10044,10046,10048,10050,10052,10054,10056,10058,10060,10062,10064,10066,10068,10070,10072,10074,10076,10078,10080,10082,10084,10086,10088,10090,10092,10094,10096,10098,10100,10102,10104,10106,10108,10110,10112,10114,10116,10118,10120,10122,10124,10126,10128,10130,10132,10134,10136,10138,10140,10142,10144,10146,10148,10150,10152,10154,10156,10158,10160,10162,10164,10166,10168,10170,10172,10174,10176,10178,10180,10182,10184,10186,10188,10190,10192,10194,10196,10198,10200,10202,10204,10206,10208,10210,10212,10214,10216,10218,10220,10222,10224,10226,10228,10230,10232,10234,10236,10238,10240,10242,10244,10246,10248,10250,10252,10254,10256,10258,10260,10262,10264,10266,10268,10270,10272,10274,10276,10278,10280,10282,10284,10286,10288,10290,10292,10294,10296,10298,10300,10302,10304,10306,10308,10310,10312,10314,10316,10318,10320,10322,10324,10326,10328,10330,10332,10334,10336,10338,10340,10342,10344,10346,10348,10350,10352,10354,10356,10358,10360,10362,10364,10366,10368,10370,10372,10374,10376,10378,10380,10382,10384,10386,10388,10390,10392,10394,10396,10398,10400,10402,10404,10406,10408,10410,10412,10414,10416,10418,10420,10422,10424,10426,10428,10430,10432,10434,10436,10438,10440,10442,10444,10446,10448,10450,10452,10454,10456,10458,10460,10462,10464,10466,10468,10470,10472,10474,10476,10478,10480,10482,10484,10486,10488,10490,10492,10494,10496,10498,10500,10502,10504,10506,10508,10510,10512,10514,10516,10518,10520,10522,10524,10526,10528,10530,10532,10534,10536,10538,10540,10542,10544,10546,10548,10550,10552,10554,10556,10558,10560,10562,10564,10566,10568,10570,10572,10574,10576,10578,10580,10582,10584,10586,10588,10590,10592,10594,10596,10598,10600,10602,10604,10606,10608,10610,10612,10614,10616,10618,10620,10622,10624,10626,10628,10630,10632,10634,10636,10638,10640,10642,10644,10646,10648,10650,10652,10654,10656,10658,10660,10662,10664,10666,10668,10670,10672,10674,10676,10678,10680,10682,10684,10686,10688,10690,10692,10694,10696,10698,10700,10702,10704,10706,10708,10710,10712,10714,10716,10718,10720,10722,10724,10726,10728,10730,10732,10734,10736,10738,10740,10742,10744,10746,10748,10750,10752,10754,10756,10758,10760,10762,10764,10766,10768,10770,10772,10774,10776,10778,10780,10782,10784,10786,10788,10790,10792,10794,10796,10798,10800,10802,10804,10806,10808,10810,10812,10814,10816,10818,10820,10822,10824,10826,10828,10830,10832,10834,10836,10838,10840,10842,10844,10846,10848,10850,10852,10854,10856,10858,10860,10862,10864,10866,10868,10870,10872,10874,10876,10878,10880,10882,10884,10886,10888,10890,10892,10894,10896,10898,10900,10902,10904,10906,10908,10910,10912,10914,10916,10918,10920,10922,10924,10926,10928,10930,10932,10934,10936,10938,10940,10942,10944,10946,10948,10950,10952,10954,10956,10958,10960,10962,10964,10966,10968,10970,10972,10974,10976,10978,10980,10982,10984,10986,10988,10990,10992,10994,10996,10998,11000,11002,11004,11006,11008,11010,11012,11014,11016,11018,11020,11022,11024,11026,11028,11030,11032,11034,11036,11038,11040,11042,11044,11046,11048,11050,11052,11054,11056,11058,11060,11062,11064,11066,11068,11070,11072,11074,11076,11078,11080,11082,11084,11086,11088,11090,11092,11094,11096,11098,11100,11102,11104,11106,11108,11110,11112,11114,11116,11118,11120,11122,11124,11126,11128,11130,11132,11134,11136,11138,11140,11142,11144,11146,11148,11150,11152,11154,11156,11158,11160,11162,11164,11166,11168,11170,11172,11174,11176,11178,11180,11182,11184,11186,11188,11190,11192,11194,11196,11198,11200,11202,11204,11206,11208,11210,11212,11214,11216,11218,11220,11222,11224,11226,11228,11230,11232,11234,11236,11238,11240,11242,11244,11246,11248,11250,11252,11254,11256,11258,11260,11262,11264,11266,11268,11270,11272,11274,11276,11278,11280,11282,11284,11286,11288,11290,11292,11294,11296,11298,11300,11302,11304,11306,11308,11310,11312,11314,11316,11318,11320,11322,11324,11326,11328,11330,11332,11334,11336,11338,11340,11342,11344,11346,11348,11350,11352,11354,11356,11358,11360,11362,11364,11366,11368,11370,11372,11374,11376,11378,11380,11382,11384,11386,11388,11390,11392,11394,11396,11398,11400,11402,11404,11406,11408,11410,11412,11414,11416,11418,11420,11422,11424,11426,11428,11430,11432,11434,11436,11438,11440,11442,11444,11446,11448,11450,11452,11454,11456,11458,11460,11462,11464,11466,11468,11470,11472,11474,11476,11478,11480,11482,11484,11486,11488,11490,11492,11494,11496,11498,11500,11502,11504,11506,11508,11510,11512,11514,11516,11518,11520,11522,11524,11526,11528,11530,11532,11534,11536,11538,11540,11542,11544,11546,11548,11550,11552,11554,11556,11558,11560,11562,11564,11566,11568,11570,11572,11574,11576,11578,11580,11582,11584,11586,11588,11590,11592,11594,11596,11598,11600,11602,11604,11606,11608,11610,11612,11614,11616,11618,11620,11622,11624,11626,11628,11630,11632,11634,11636,11638,11640,11642,11644,11646,11648,11650,11652,11654,11656,11658,11660,11662,11664,11666,11668,11670,11672,11674,11676,11678,11680,11682,11684,11686,11688,11690,11692,11694,11696,11698,11700,11702,11704,11706,11708,11710,11712,11714,11716,11718,11720,11722,11724,11726,11728,11730,11732,11734,11736,11738,11740,11742,11744,11746,11748,11750,11752,11754,11756,11758,11760,11762,11764,11766,11768,11770,11772,11774,11776,11778,11780,11782,11784,11786,11788,11790,11792,11794,11796,11798,11800,11802,11804,11806,11808,11810,11812,11814,11816,11818,11820,11822,11824,11826,11828,11830,11832,11834,11836,11838,11840,11842,11844,11846,11848,11850,11852,11854,11856,11858,11860,11862,11864,11866,11868,11870,11872,11874,11876,11878,11880,11882,11884,11886,11888,11890,11892,11894,11896,11898,11900,11902,11904,11906,11908,11910,11912,11914,11916,11918,11920,11922,11924,11926,11928,11930,11932,11934,11936,11938,11940,11942,11944,11946,11948,11950,11952,11954,11956,11958,11960,11962,11964,11966,11968,11970,11972,11974,11976,11978,11980,11982,11984,11986,11988,11990,11992,11994,11996,11998,12000,12002,12004,12006,12008,12010,12012,12014,12016,12018,12020,12022,12024,12026,12028,12030,12032,12034,12036,12038,12040,12042,12044,12046,12048,12050,12052,12054,12056,12058,12060,12062,12064,12066,12068,12070,12072,12074,12076,12078,12080,12082,12084,12086,12088,12090,12092,12094,12096,12098,12100,12102,12104,12106,12108,12110,12112,12114,12116,12118,12120,12122,12124,12126,12128,12130,12132,12134,12136,12138,12140,12142,12144,12146,12148,12150,12152,12154,12156,12158,12160,12162,12164,12166,12168,12170,12172,12174,12176,12178,12180,12182,12184,12186,12188,12190,12192,12194,12196,12198,12200,12202,12204,12206,12208,12210,12212,12214,12216,12218,12220,12222,12224,12226,12228,12230,12232,12234,12236,12238,12240,12242,12244,12246,12248,12250,12252,12254,12256,12258,12260,12262,12264,12266,12268,12270,12272,12274,12276,12278,12280,12282,12284,12286,12288,12290,12292,12294,12296,12298,12300,12302,12304,12306,12308,12310,12312,12314,12316,12318,12320,12322,12324,12326,12328,12330,12332,12334,12336,12338,12340,12342,12344,12346,12348,12350,12352,12354,12356,12358,12360,12362,12364,12366,12368,12370,12372,12374,12376,12378,12380,12382,12384,12386,12388,12390,12392,12394,12396,12398,12400,12402,12404,12406,12408,12410,12412,12414,12416,12418,12420,12422,12424,12426,12428,12430,12432,12434,12436,12438,12440,12442,12444,12446,12448,12450,12452,12454,12456,12458,12460,12462,12464,12466,12468,12470,12472,12474,12476,12478,12480,12482,12484,12486,12488,12490,12492,12494,12496,12498,12500,12502,12504,12506,12508,12510,12512,12514,12516,12518,12520,12522,12524,12526,12528,12530,12532,12534,12536,12538,12540,12542,12544,12546,12548,12550,12552,12554,12556,12558,12560,12562,12564,12566,12568,12570,12572,12574,12576,12578,12580,12582,12584,12586,12588,12590,12592,12594,12596,12598,12600,12602,12604,12606,12608,12610,12612,12614,12616,12618,12620,12622,12624,12626,12628,12630,12632,12634,12636,12638,12640,12642,12644,12646,12648,12650,12652,12654,12656,12658,12660,12662,12664,12666,12668,12670,12672,12674,12676,12678,12680,12682,12684,12686,12688,12690,12692,12694,12696,12698,12700,12702,12704,12706,12708,12710,12712,12714,12716,12718,12720,12722,12724,12726,12728,12730,12732,12734,12736,12738,12740,12742,12744,12746,12748,12750,12752,12754,12756,12758,12760,12762,12764,12766,12768,12770,12772,12774,12776,12778,12780,12782,12784,12786,12788,12790,12792,12794,12796,12798,12800,12802,12804,12806,12808,12810,12812,12814,12816,12818,12820,12822,12824,12826,12828,12830,12832,12834,12836,12838,12840,12842,12844,12846,12848,12850,12852,12854,12856,12858,12860,12862,12864,12866,12868,12870,12872,12874,12876,12878,12880,12882,12884,12886,12888,12890,12892,12894,12896,12898,12900,12902,12904,12906,12908,12910,12912,12914,12916,12918,12920,12922,12924,12926,12928,12930,12932,12934,12936,12938,12940,12942,12944,12946,12948,12950,12952,12954,12956,12958,12960,12962,12964,12966,12968,12970,12972,12974,12976,12978,12980,12982,12984,12986,12988,12990,12992,12994,12996,12998,13000,13002,13004,13006,13008,13010,13012,13014,13016,13018,13020,13022,13024,13026,13028,13030,13032,13034,13036,13038,13040,13042,13044,13046,13048,13050,13052,13054,13056,13058,13060,13062,13064,13066,13068,13070,13072,13074,13076,13078,13080,13082,13084,13086,13088,13090,13092,13094,13096,13098,13100,13102,13104,13106,13108,13110,13112,13114,13116,13118,13120,13122,13124,13126,13128,13130,13132,13134,13136,13138,13140,13142,13144,13146,13148,13150,13152,13154,13156,13158,13160,13162,13164,13166,13168,13170,13172,13174,13176,13178,13180,13182,13184,13186,13188,13190,13192,13194,13196,13198,13200,13202,13204,13206,13208,13210,13212,13214,13216,13218,13220,13222,13224,13226,13228,13230,13232,13234,13236,13238,13240,13242,13244,13246,13248,13250,13252,13254,13256,13258,13260,13262,13264,13266,13268,13270,13272,13274,13276,13278,13280,13282,13284,13286,13288,13290,13292,13294,13296,13298,13300,13302,13304,13306,13308,13310,13312,13314,13316,13318,13320,13322,13324,13326,13328,13330,13332,13334,13336,13338,13340,13342,13344,13346,13348,13350,13352,13354,13356,13358,13360,13362,13364,13366,13368,13370,13372,13374,13376,13378,13380,13382,13384,13386,13388,13390,13392,13394,13396,13398,13400,13402,13404,13406,13408,13410,13412,13414,13416,13418,13420,13422,13424,13426,13428,13430,13432,13434,13436,13438,13440,13442,13444,13446,13448,13450,13452,13454,13456,13458,13460,13462,13464,13466,13468,13470,13472,13474,13476,13478,13480,13482,13484,13486,13488,13490,13492,13494,13496,13498,13500,13502,13504,13506,13508,13510,13512,13514,13516,13518,13520,13522,13524,13526,13528,13530,13532,13534,13536,13538,13540,13542,13544,13546,13548,13550,13552,13554,13556,13558,13560,13562,13564,13566,13568,13570,13572,13574,13576,13578,13580,13582,13584,13586,13588,13590,13592,13594,13596,13598,13600,13602,13604,13606,13608,13610,13612,13614,13616,13618,13620,13622,13624,13626,13628,13630,13632,13634,13636,13638,13640,13642,13644,13646,13648,13650,13652,13654,13656,13658,13660,13662,13664,13666,13668,13670,13672,13674,13676,13678,13680,13682,13684,13686,13688,13690,13692,13694,13696,13698,13700,13702,13704,13706,13708,13710,13712,13714,13716,13718,13720,13722,13724,13726,13728,13730,13732,13734,13736,13738,13740,13742,13744,13746,13748,13750,13752,13754,13756,13758,13760,13762,13764,13766,13768,13770,13772,13774,13776,13778,13780,13782,13784,13786,13788,13790,13792,13794,13796,13798,13800,13802,13804,13806,13808,13810,13812,13814,13816,13818,13820,13822,13824,13826,13828,13830,13832,13834,13836,13838,13840,13842,13844,13846,13848,13850,13852,13854,13856,13858,13860,13862,13864,13866,13868,13870,13872,13874,13876,13878,13880,13882,13884,13886,13888,13890,13892,13894,13896,13898,13900,13902,13904,13906,13908,13910,13912,13914,13916,13918,13920,13922,13924,13926,13928,13930,13932,13934,13936,13938,13940,13942,13944,13946,13948,13950,13952,13954,13956,13958,13960,13962,13964,13966,13968,13970,13972,13974,13976,13978,13980,13982,13984,13986,13988,13990,13992,13994,13996,13998,14000,14002,14004,14006,14008,14010,14012,14014,14016,14018,14020,14022,14024,14026,14028,14030,14032,14034,14036,14038,14040,14042,14044,14046,14048,14050,14052,14054,14056,14058,14060,14062,14064,14066,14068,14070,14072,14074,14076,14078,14080,14082,14084,14086,14088,14090,14092,14094,14096,14098,14100,14102,14104,14106,14108,14110,14112,14114,14116,14118,14120,14122,14124,14126,14128,14130,14132,14134,14136,14138,14140,14142,14144,14146,14148,14150,14152,14154,14156,14158,14160,14162,14164,14166,14168,14170,14172,14174,14176,14178,14180,14182,14184,14186,14188,14190,14192,14194,14196,14198,14200,14202,14204,14206,14208,14210,14212,14214,14216,14218,14220,14222,14224,14226,14228,14230,14232,14234,14236,14238,14240,14242,14244,14246,14248,14250,14252,14254,14256,14258,14260,14262,14264,14266,14268,14270,14272,14274,14276,14278,14280,14282,14284,14286,14288,14290,14292,14294,14296,14298,14300,14302,14304,14306,14308,14310,14312,14314,14316,14318,14320,14322,14324,14326,14328,14330,14332,14334,14336,14338,14340,14342,14344,14346,14348,14350,14352,14354,14356,14358,14360,14362,14364,14366,14368,14370,14372,14374,14376,14378,14380,14382,14384,14386,14388,14390,14392,14394,14396,14398,14400,14402,14404,14406,14408,14410,14412,14414,14416,14418,14420,14422,14424,14426,14428,14430,14432,14434,14436,14438,14440,14442,14444,14446,14448,14450,14452,14454,14456,14458,14460,14462,14464,14466,14468,14470,14472,14474,14476,14478,14480,14482,14484,14486,14488,14490,14492,14494,14496,14498,14500,14502,14504,14506,14508,14510,14512,14514,14516,14518,14520,14522,14524,14526,14528,14530,14532,14534,14536,14538,14540,14542,14544,14546,14548,14550,14552,14554,14556,14558,14560,14562,14564,14566,14568,14570,14572,14574,14576,14578,14580,14582,14584,14586,14588,14590,14592,14594,14596,14598,14600,14602,14604,14606,14608,14610,14612,14614,14616,14618,14620,14622,14624,14626,14628,14630,14632,14634,14636,14638,14640,14642,14644,14646,14648,14650,14652,14654,14656,14658,14660,14662,14664,14666,14668,14670,14672,14674,14676,14678,14680,14682,14684,14686,14688,14690,14692,14694,14696,14698,14700,14702,14704,14706,14708,14710,14712,14714,14716,14718,14720,14722,14724,14726,14728,14730,14732,14734,14736,14738,14740,14742,14744,14746,14748,14750,14752,14754,14756,14758,14760,14762,14764,14766,14768,14770,14772,14774,14776,14778,14780,14782,14784,14786,14788,14790,14792,14794,14796,14798,14800,14802,14804,14806,14808,14810,14812,14814,14816,14818,14820,14822,14824,14826,14828,14830,14832,14834,14836,14838,14840,14842,14844,14846,14848,14850,14852,14854,14856,14858,14860,14862,14864,14866,14868,14870,14872,14874,14876,14878,14880,14882,14884,14886,14888,14890,14892,14894,14896,14898,14900,14902,14904,14906,14908,14910,14912,14914,14916,14918,14920,14922,14924,14926,14928,14930,14932,14934,14936,14938,14940,14942,14944,14946,14948,14950,14952,14954,14956,14958,14960,14962,14964,14966,14968,14970,14972,14974,14976,14978,14980,14982,14984,14986,14988,14990,14992,14994,14996,14998,15000,15002,15004,15006,15008,15010,15012,15014,15016,15018,15020,15022,15024,15026,15028,15030,15032,15034,15036,15038,15040,15042,15044,15046,15048,15050,15052,15054,15056,15058,15060,15062,15064,15066,15068,15070,15072,15074,15076,15078,15080,15082,15084,15086,15088,15090,15092,15094,15096,15098,15100,15102,15104,15106,15108,15110,15112,15114,15116,15118,15120,15122,15124,15126,15128,15130,15132,15134,15136,15138,15140,15142,15144,15146,15148,15150,15152,15154,15156,15158,15160,15162,15164,15166,15168,15170,15172,15174,15176,15178,15180,15182,15184,15186,15188,15190,15192,15194,15196,15198,15200,15202,15204,15206,15208,15210,15212,15214,15216,15218,15220,15222,15224,15226,15228,15230,15232,15234,15236,15238,15240,15242,15244,15246,15248,15250,15252,15254,15256,15258,15260,15262,15264,15266,15268,15270,15272,15274,15276,15278,15280,15282,15284,15286,15288,15290,15292,15294,15296,15298,15300,15302,15304,15306,15308,15310,15312,15314,15316,15318,15320,15322,15324,15326,15328,15330,15332,15334,15336,15338,15340,15342,15344,15346,15348,15350,15352,15354,15356,15358,15360,15362,15364,15366,15368,15370,15372,15374,15376,15378,15380,15382,15384,15386,15388,15390,15392,15394,15396,15398,15400,15402,15404,15406,15408,15410,15412,15414,15416,15418,15420,15422,15424,15426,15428,15430,15432,15434,15436,15438,15440,15442,15444,15446,15448,15450,15452,15454,15456,15458,15460,15462,15464,15466,15468,15470,15472,15474,15476,15478,15480,15482,15484,15486,15488,15490,15492,15494,15496,15498,15500,15502,15504,15506,15508,15510,15512,15514,15516,15518,15520,15522,15524,15526,15528,15530,15532,15534,15536,15538,15540,15542,15544,15546,15548,15550,15552,15554,15556,15558,15560,15562,15564,15566,15568,15570,15572,15574,15576,15578,15580,15582,15584,15586,15588,15590,15592,15594,15596,15598,15600,15602,15604,15606,15608,15610,15612,15614,15616,15618,15620,15622,15624,15626,15628,15630,15632,15634,15636,15638,15640,15642,15644,15646,15648,15650,15652,15654,15656,15658,15660,15662,15664,15666,15668,15670,15672,15674,15676,15678,15680,15682,15684,15686,15688,15690,15692,15694,15696,15698,15700,15702,15704,15706,15708,15710,15712,15714,15716,15718,15720,15722,15724,15726,15728,15730,15732,15734,15736,15738,15740,15742,15744,15746,15748,15750,15752,15754,15756,15758,15760,15762,15764,15766,15768,15770,15772,15774,15776,15778,15780,15782,15784,15786,15788,15790,15792,15794,15796,15798,15800,15802,15804,15806,15808,15810,15812,15814,15816,15818,15820,15822,15824,15826,15828,15830,15832,15834,15836,15838,15840,15842,15844,15846,15848,15850,15852,15854,15856,15858,15860,15862,15864,15866,15868,15870,15872,15874,15876,15878,15880,15882,15884,15886,15888,15890,15892,15894,15896,15898,15900,15902,15904,15906,15908,15910,15912,15914,15916,15918,15920,15922,15924,15926,15928,15930,15932,15934,15936,15938,15940,15942,15944,15946,15948,15950,15952,15954,15956,15958,15960,15962,15964,15966,15968,15970,15972,15974,15976,15978,15980,15982,15984,15986,15988,15990,15992,15994,15996,15998,16000,16002,16004,16006,16008,16010,16012,16014,16016,16018,16020,1,16024,16026,16028,16030,16032,16034,16036,16038,16040,16042,16044,16046,16048,16050,16052,16054,16056,16058,16060,16062,16064,16066,16068,16070,16072,16074,16076,16078,16080,16082,16084,16086,16088,16090,16092,16094,16096,16098,16100,16102,16104,16106,16108,16110,16112,16114,16116,16118,16120,16122,16124,16126,16128,16130,16132,16134,16136,16138,16140,16142,16144,16146,16148,16150,16152,16154,16156,16158,16160,16162,16164,16166,16168,16170,16172,16174,16176,16178,16180,16182,16184,16186,16188,16190,16192,16194,16196,16198,16200,16202,16204,16206,16208,16210,16212,16214,16216,16218,16220,16222,16224,16226,16228,16230,16232,16234,16236,16238,16240,16242,16244,16246,16248,16250,16252,16254,16256,16258,16260,16262,16264,16266,16268,16270,16272,16274,16276,16278,16280,16282,16284,16286,16288,16290,16292,16294,16296,16298,16300,16302,16304,16306,16308,16310,16312,16314,16316,16318,16320,16322,16324,16326,16328,16330,16332,16334,16336,16338,16340,16342,16344,16346,16348,16350,16352,16354,16356,16358,16360,16362,16364,16366,16368,16370,16372,16374,16376,16378,16380,16382,16384,16386,16388,16390,16392,16394,16396,16398,16400,16402,16404,16406,16408,16410,16412,16414,16416,16418,16420,16422,16424,16426,16428,16430,16432,16434,16436,16438,16440,16442,16444,16446,16448,16450,16452,16454,16456,16458,16460,16462,16464,16466,16468,16470,16472,16474,16476,16478,16480,16482,16484,16486,16488,16490,16492,16494,16496,16498,16500,16502,16504,16506,16508,16510,16512,16514,16516,16518,16520,16522,16524,16526,16528,16530,16532,16534,16536,16538,16540,16542,16544,16546,16548,16550,16552,16554,16556,16558,16560,16562,16564,16566,16568,16570,16572,16574,16576,16578,16580,16582,16584,16586,16588,16590,16592,16594,16596,16598,16600,16602,16604,16606,16608,16610,16612,16614,16616,16618,16620,16622,16624,16626,16628,16630,16632,16634,16636,16638,16640,16642,16644,16646,16648,16650,16652,16654,16656,16658,16660,16662,16664,16666,16668,16670,16672,16674,16676,16678,16680,16682,16684,16686,16688,16690,16692,16694,16696,16698,16700,16702,16704,16706,16708,16710,16712,16714,16716,16718,16720,16722,16724,16726,16728,16730,16732,16734,16736,16738,16740,16742,16744,16746,16748,16750,16752,16754,16756,16758,16760,16762,16764,16766,16768,16770,16772,16774,16776,16778,16780,16782,16784,16786,16788,16790,16792,16794,16796,16798,16800,16802,16804,16806,16808,16810,16812,16814,16816,16818,16820,16822,16824,16826,16828,16830,16832,16834,16836,16838,16840,16842,16844,16846,16848,16850,16852,16854,16856,16858,16860,16862,16864,16866,16868,16870,16872,16874,16876,16878,16880,16882,16884,16886,16888,16890,16892,16894,16896,16898,16900,16902,16904,16906,16908,16910,16912,16914,16916,16918,16920,16922,16924,16926,16928,16930,16932,16934,16936,16938,16940,16942,16944,16946,16948,16950,16952,16954,16956,16958,16960,16962,16964,16966,16968,16970,16972,16974,16976,16978,16980,16982,16984,16986,16988,16990,16992,16994,16996,16998,17000,17002,17004,17006,17008,17010,17012,17014,17016,17018,17020,17022,17024,17026,17028,17030,17032,17034,17036,17038,17040,17042,17044,17046,17048,17050,17052,17054,17056,17058,17060,17062,17064,17066,17068,17070,17072,17074,17076,17078,17080,17082,17084,17086,17088,17090,17092,17094,17096,17098,17100,17102,17104,17106,17108,17110,17112,17114,17116,17118,17120,17122,17124,17126,17128,17130,17132,17134,17136,17138,17140,17142,17144,17146,17148,17150,17152,17154,17156,17158,17160,17162,17164,17166,17168,17170,17172,17174,17176,17178,17180,17182,17184,17186,17188,17190,17192,17194,17196,17198,17200,17202,17204,17206,17208,17210,17212,17214,17216,17218,17220,17222,17224,17226,17228,17230,17232,17234,17236,17238,17240,17242,17244,17246,17248,17250,17252,17254,17256,17258,17260,17262,17264,17266,17268,17270,17272,17274,17276,17278,17280,17282,17284,17286,17288,17290,17292,17294,17296,17298,17300,17302,17304,17306,17308,17310,17312,17314,17316,17318,17320,17322,17324,17326,17328,17330,17332,17334,17336,17338,17340,17342,17344,17346,17348,17350,17352,17354,17356,17358,17360,17362,17364,17366,17368,17370,17372,17374,17376,17378,17380,17382,17384,17386,17388,17390,17392,17394,17396,17398,17400,17402,17404,17406,17408,17410,17412,17414,17416,17418,17420,17422,17424,17426,17428,17430,17432,17434,17436,17438,17440,17442,17444,17446,17448,17450,17452,17454,17456,17458,17460,17462,17464,17466,17468,17470,17472,17474,17476,17478,17480,17482,17484,17486,17488,17490,17492,17494,17496,17498,17500,17502,17504,17506,17508,17510,17512,17514,17516,17518,17520,17522,17524,17526,17528,17530,17532,17534,17536,17538,17540,17542,17544,17546,17548,17550,17552,17554,17556,17558,17560,17562,17564,17566,17568,17570,17572,17574,17576,17578,17580,17582,17584,17586,17588,17590,17592,17594,17596,17598,17600,17602,17604,17606,17608,17610,17612,17614,17616,17618,17620,17622,17624,17626,17628,17630,17632,17634,17636,17638,17640,17642,17644,17646,17648,17650,17652,17654,17656,17658,17660,17662,17664,17666,17668,17670,17672,17674,17676,17678,17680,17682,17684,17686,17688,17690,17692,17694,17696,17698,17700,17702,17704,17706,17708,17710,17712,17714,17716,17718,17720,17722,17724,17726,17728,17730,17732,17734,17736,17738,17740,17742,17744,17746,17748,17750,17752,17754,17756,17758,17760,17762,17764,17766,17768,17770,17772,17774,17776,17778,17780,17782,17784,17786,17788,17790,17792,17794,17796,17798,17800,17802,17804,17806,17808,17810,17812,17814,17816,17818,17820,17822,17824,17826,17828,17830,17832,17834,17836,17838,17840,17842,17844,17846,17848,17850,17852,17854,17856,17858,17860,17862,17864,17866,17868,17870,17872,17874,17876,17878,17880,17882,17884,17886,17888,17890,17892,17894,17896,17898,17900,17902,17904,17906,17908,17910,17912,17914,17916,17918,17920,17922,17924,17926,17928,17930,17932,17934,17936,17938,17940,17942,17944,17946,17948,17950,17952,17954,17956,17958,17960,17962,17964,17966,17968,17970,17972,17974,17976,17978,17980,17982,17984,17986,17988,17990,17992,17994,17996,17998,18000,18002,18004,18006,18008,18010,18012,18014,18016,18018,18020,18022,18024,18026,18028,18030,18032,18034,18036,18038,18040,18042,18044,18046,18048,18050,18052,18054,18056,18058,18060,18062,18064,18066,18068,18070,18072,18074,18076,18078,18080,18082,18084,18086,18088,18090,18092,18094,18096,18098,18100,18102,18104,18106,18108,18110,18112,18114,18116,18118,18120,18122,18124,18126,18128,18130,18132,18134,18136,18138,18140,18142,18144,18146,18148,18150,18152,18154,18156,18158,18160,18162,18164,18166,18168,18170,18172,18174,18176,18178,18180,18182,18184,18186,18188,18190,18192,18194,18196,18198,18200,18202,18204,18206,18208,18210,18212,18214,18216,18218,18220,18222,18224,18226,18228,18230,18232,18234,18236,18238,18240,18242,18244,18246,18248,18250,18252,18254,18256,18258,18260,18262,18264,18266,18268,18270,18272,18274,18276,18278,18280,18282,18284,18286,18288,18290,18292,18294,18296,18298,18300,18302,18304,18306,18308,18310,18312,18314,18316,18318,18320,18322,18324,18326,18328,18330,18332,18334,18336,18338,18340,18342,18344,18346,18348,18350,18352,18354,18356,18358,18360,18362,18364,18366,18368,18370,18372,18374,18376,18378,18380,18382,18384,18386,18388,18390,18392,18394,18396,18398,18400,18402,18404,18406,18408,18410,18412,18414,18416,18418,18420,18422,18424,18426,18428,18430,18432,18434,18436,18438,18440,18442,18444,18446,18448,18450,18452,18454,18456,18458,18460,18462,18464,18466,18468,18470,18472,18474,18476,18478,18480,18482,18484,18486,18488,18490,18492,18494,18496,18498,18500,18502,18504,18506,18508,18510,18512,18514,18516,18518,18520,18522,18524,18526,18528,18530,18532,18534,18536,18538,18540,18542,18544,18546,18548,18550,18552,18554,18556,18558,18560,18562,18564,18566,18568,18570,18572,18574,18576,18578,18580,18582,18584,18586,18588,18590,18592,18594,18596,18598,18600,18602,18604,18606,18608,18610,18612,18614,18616,18618,18620,18622,18624,18626,18628,18630,18632,18634,18636,18638,18640,18642,18644,18646,18648,18650,18652,18654,18656,18658,18660,18662,18664,18666,18668,18670,18672,18674,18676,18678,18680,18682,18684,18686,18688,18690,18692,18694,18696,18698,18700,18702,18704,18706,18708,18710,18712,18714,18716,18718,18720,18722,18724,18726,18728,18730,18732,18734,18736,18738,18740,18742,18744,18746,18748,18750,18752,18754,18756,18758,18760,18762,18764,18766,18768,18770,18772,18774,18776,18778,18780,18782,18784,18786,18788,18790,18792,18794,18796,18798,18800,18802,18804,18806,18808,18810,18812,18814,18816,18818,18820,18822,18824,18826,18828,18830,18832,18834,18836,18838,18840,18842,18844,18846,18848,18850,18852,18854,18856,18858,18860,18862,18864,18866,18868,18870,18872,18874,18876,18878,18880,18882,18884,18886,18888,18890,18892,18894,18896,18898,18900,18902,18904,18906,18908,18910,18912,18914,18916,18918,18920,18922,18924,18926,18928,18930,18932,18934,18936,18938,18940,18942,18944,18946,18948,18950,18952,18954,18956,18958,18960,18962,18964,18966,18968,18970,18972,18974,18976,18978,18980,18982,18984,18986,18988,18990,18992,18994,18996,18998,19000,19002,19004,19006,19008,19010,19012,19014,19016,19018,19020,19022,19024,19026,19028,19030,19032,19034,19036,19038,19040,19042,19044,19046,19048,19050,19052,19054,19056,19058,19060,19062,19064,19066,19068,19070,19072,19074,19076,19078,19080,19082,19084,19086,19088,19090,19092,19094,19096,19098,19100,19102,19104,19106,19108,19110,19112,19114,19116,19118,19120,19122,19124,19126,19128,19130,19132,19134,19136,19138,19140,19142,19144,19146,19148,19150,19152,19154,19156,19158,19160,19162,19164,19166,19168,19170,19172,19174,19176,19178,19180,19182,19184,19186,19188,19190,19192,19194,19196,19198,19200,19202,19204,19206,19208,19210,19212,19214,19216,19218,19220,19222,19224,19226,19228,19230,19232,19234,19236,19238,19240,19242,19244,19246,19248,19250,19252,19254,19256,19258,19260,19262,19264,19266,19268,19270,19272,19274,19276,19278,19280,19282,19284,19286,19288,19290,19292,19294,19296,19298,19300,19302,19304,19306,19308,19310,19312,19314,19316,19318,19320,19322,19324,19326,19328,19330,19332,19334,19336,19338,19340,19342,19344,19346,19348,19350,19352,19354,19356,19358,19360,19362,19364,19366,19368,19370,19372,19374,19376,19378,19380,19382,19384,19386,19388,19390,19392,19394,19396,19398,19400,19402,19404,19406,19408,19410,19412,19414,19416,19418,19420,19422,19424,19426,19428,19430,19432,19434,19436,19438,19440,19442,19444,19446,19448,19450,19452,19454,19456,19458,19460,19462,19464,19466,19468,19470,19472,19474,19476,19478,19480,19482,19484,19486,19488,19490,19492,19494,19496,19498,19500,19502,19504,19506,19508,19510,19512,19514,19516,19518,19520,19522,19524,19526,19528,19530,19532,19534,19536,19538,19540,19542,19544,19546,19548,19550,19552,19554,19556,19558,19560,19562,19564,19566,19568,19570,19572,19574,19576,19578,19580,19582,19584,19586,19588,19590,19592,19594,19596,19598,19600,19602,19604,19606,19608,19610,19612,19614,19616,19618,19620,19622,19624,19626,19628,19630,19632,19634,19636,19638,19640,19642,19644,19646,19648,19650,19652,19654,19656,19658,19660,19662,19664,19666,19668,19670,19672,19674,19676,19678,19680,19682,19684,19686,19688,19690,19692,19694,19696,19698,19700,19702,19704,19706,19708,19710,19712,19714,19716,19718,19720,19722,19724,19726,19728,19730,19732,19734,19736,19738,19740,19742,19744,19746,19748,19750,19752,19754,19756,19758,19760,19762,19764,19766,19768,19770,19772,19774,19776,19778,19780,19782,19784,19786,19788,19790,19792,19794,19796,19798,19800,19802,19804,19806,19808,19810,19812,19814,19816,19818,19820,19822,19824,19826,19828,19830,19832,19834,19836,19838,19840,19842,19844,19846,19848,19850,19852,19854,19856,19858,19860,19862,19864,19866,19868,19870,19872,19874,19876,19878,19880,19882,19884,19886,19888,19890,19892,19894,19896,19898,19900,19902,19904,19906,19908,19910,19912,19914,19916,19918,19920,19922,19924,19926,19928,19930,19932,19934,19936,19938,19940,19942,19944,19946,19948,19950,19952,19954,19956,19958,19960,19962,19964,19966,19968,19970,19972,19974,19976,19978,19980,19982,19984,19986,19988,19990,19992,19994,19996,19998,20000,20002,20004,20006,20008,20010,20012,20014,20016,20018,20020,20022,20024,20026,20028,20030,20032,20034,20036,20038,20040,20042,20044,20046,20048,20050,20052,20054,20056,20058,20060,20062,20064,20066,20068,20070,20072,20074,20076,20078,20080,20082,20084,20086,20088,20090,20092,20094,20096,20098,20100,20102,20104,20106,20108,20110,20112,20114,20116,20118,20120,20122,20124,20126,20128,20130,20132,20134,20136,20138,20140,20142,20144,20146,20148,20150,20152,20154,20156,20158,20160,20162,20164,20166,20168,20170,20172,20174,20176,20178,20180,20182,20184,20186,20188,20190,20192,20194,20196,20198,20200,20202,20204,20206,20208,20210,20212,20214,20216,20218,20220,20222,20224,20226,20228,20230,20232,20234,20236,20238,20240,20242,20244,20246,20248,20250,20252,20254,20256,20258,20260,20262,20264,20266,20268,20270,20272,20274,20276,20278,20280,20282,20284,20286,20288,20290,20292,20294,20296,20298,20300,20302,20304,20306,20308,20310,20312,20314,20316,20318,20320,20322,20324,20326,20328,20330,20332,20334,20336,20338,20340,20342,20344,20346,20348,20350,20352,20354,20356,20358,20360,20362,20364,20366,20368,20370,20372,20374,20376,20378,20380,20382,20384,20386,20388,20390,20392,20394,20396,20398,20400,20402,20404,20406,20408,20410,20412,20414,20416,20418,20420,20422,20424,20426,20428,20430,20432,20434,20436,20438,20440,20442,20444,20446,20448,20450,20452,20454,20456,20458,20460,20462,20464,20466,20468,20470,20472,20474,20476,20478,20480,20482,20484,20486,20488,20490,20492,20494,20496,20498,20500,20502,20504,20506,20508,20510,20512,20514,20516,20518,20520,20522,20524,20526,20528,20530,20532,20534,20536,20538,20540,20542,20544,20546,20548,20550,20552,20554,20556,20558,20560,20562,20564,20566,20568,20570,20572,20574,20576,20578,20580,20582,20584,20586,20588,20590,20592,20594,20596,20598,20600,20602,20604,20606,20608,20610,20612,20614,20616,20618,20620,20622,20624,20626,20628,20630,20632,20634,20636,20638,20640,20642,20644,20646,20648,20650,20652,20654,20656,20658,20660,20662,20664,20666,20668,20670,20672,20674,20676,20678,20680,20682,20684,20686,20688,20690,20692,20694,20696,20698,20700,20702,20704,20706,20708,20710,20712,20714,20716,20718,20720,20722,20724,20726,20728,20730,20732,20734,20736,20738,20740,20742,20744,20746,20748,20750,20752,20754,20756,20758,20760,20762,20764,20766,20768,20770,20772,20774,20776,20778,20780,20782,20784,20786,20788,20790,20792,20794,20796,20798,20800,20802,20804,20806,20808,20810,20812,20814,20816,20818,20820,20822,20824,20826,20828,20830,20832,20834,20836,20838,20840,20842,20844,20846,20848,20850,20852,20854,20856,20858,20860,20862,20864,20866,20868,20870,20872,20874,20876,20878,20880,20882,20884,20886,20888,20890,20892,20894,20896,20898,20900,20902,20904,20906,20908,20910,20912,20914,20916,20918,20920,20922,20924,20926,20928,20930,20932,20934,20936,20938,20940,20942,20944,20946,20948,20950,20952,20954,20956,20958,20960,20962,20964,20966,20968,20970,20972,20974,20976,20978,20980,20982,20984,20986,20988,20990,20992,20994,20996,20998,21000,21002,21004,21006,21008,21010,21012,21014,21016,21018,21020,21022,21024,21026,21028,21030,21032,21034,21036,21038,21040,21042,21044,21046,21048,21050,21052,21054,21056,21058,21060,21062,21064,21066,21068,21070,21072,21074,21076,21078,21080,21082,21084,21086,21088,21090,21092,21094,21096,21098,21100,21102,21104,21106,21108,21110,21112,21114,21116,21118,21120,21122,21124,21126,21128,21130,21132,21134,21136,21138,21140,21142,21144,21146,21148,21150,21152,21154,21156,21158,21160,21162,21164,21166,21168,21170,21172,21174,21176,21178,21180,21182,21184,21186,21188,21190,21192,21194,21196,21198,21200,21202,21204,21206,21208,21210,21212,21214,21216,21218,21220,21222,21224,21226,21228,21230,21232,21234,21236,21238,21240,21242,21244,21246,21248,21250,21252,21254,21256,21258,21260,21262,21264,21266,21268,21270,21272,21274,21276,21278,21280,21282,21284,21286,21288,21290,21292,21294,21296,21298,21300,21302,21304,21306,21308,21310,21312,21314,21316,21318,21320,21322,21324,21326,21328,21330,21332,21334,21336,21338,21340,21342,21344,21346,21348,21350,21352,21354,21356,21358,21360,21362,21364,21366,21368,21370,21372,21374,21376,21378,21380,21382,21384,21386,21388,21390,21392,21394,21396,21398,21400,21402,21404,21406,21408,21410,21412,21414,21416,21418,21420,21422,21424,21426,21428,21430,21432,21434,21436,21438,21440,21442,21444,21446,21448,21450,21452,21454,21456,21458,21460,21462,21464,21466,21468,21470,21472,21474,21476,21478,21480,21482,21484,21486,21488,21490,21492,21494,21496,21498,21500,21502,21504,21506,21508,21510,21512,21514,21516,21518,21520,21522,21524,21526,21528,21530,21532,21534,21536,21538,21540,21542,21544,21546,21548,21550,21552,21554,21556,21558,21560,21562,21564,21566,21568,21570,21572,21574,21576,21578,21580,21582,21584,21586,21588,21590,21592,21594,21596,21598,21600,21602,21604,21606,21608,21610,21612,21614,21616,21618,21620,21622,21624,21626,21628,21630,21632,21634,21636,21638,21640,21642,21644,21646,21648,21650,21652,21654,21656,21658,21660,21662,21664,21666,21668,21670,21672,21674,21676,21678,21680,21682,21684,21686,21688,21690,21692,21694,21696,21698,21700,21702,21704,21706,21708,21710,21712,21714,21716,21718,21720,21722,21724,21726,21728,21730,21732,21734,21736,21738,21740,21742,21744,21746,21748,21750,21752,21754,21756,21758,21760,21762,21764,21766,21768,21770,21772,21774,21776,21778,21780,21782,21784,21786,21788,21790,21792,21794,21796,21798,21800,21802,21804,21806,21808,21810,21812,21814,21816,21818,21820,21822,21824,21826,21828,21830,21832,21834,21836,21838,21840,21842,21844,21846,21848,21850,21852,21854,21856,21858,21860,21862,21864,21866,21868,21870,21872,21874,21876,21878,21880,21882,21884,21886,21888,21890,21892,21894,21896,21898,21900,21902,21904,21906,21908,21910,21912,21914,21916,21918,21920,21922,21924,21926,21928,21930,21932,21934,21936,21938,21940,21942,21944,21946,21948,21950,21952,21954,21956,21958,21960,21962,21964,21966,21968,21970,21972,21974,21976,21978,21980,21982,21984,21986,21988,21990,21992,21994,21996,21998,22000,22002,22004,22006,22008,22010,22012,22014,22016,22018,22020,22022,22024,22026,22028,22030,22032,22034,22036,22038,22040,22042,22044,22046,22048,22050,22052,22054,22056,22058,22060,22062,22064,22066,22068,22070,22072,22074,22076,22078,22080,22082,22084,22086,22088,22090,22092,22094,22096,22098,22100,22102,22104,22106,22108,22110,22112,22114,22116,22118,22120,22122,22124,22126,22128,22130,22132,22134,22136,22138,22140,22142,22144,22146,22148,22150,22152,22154,22156,22158,22160,22162,22164,22166,22168,22170,22172,22174,22176,22178,22180,22182,22184,22186,22188,22190,22192,22194,22196,22198,22200,22202,22204,22206,22208,22210,22212,22214,22216,22218,22220,22222,22224,22226,22228,22230,22232,22234,22236,22238,22240,22242,22244,22246,22248,22250,22252,22254,22256,22258,22260,22262,22264,22266,22268,22270,22272,22274,22276,22278,22280,22282,22284,22286,22288,22290,22292,22294,22296,22298,22300,22302,22304,22306,22308,22310,22312,22314,22316,22318,22320,22322,22324,22326,22328,22330,22332,22334,22336,22338,22340,22342,22344,22346,22348,22350,22352,22354,22356,22358,22360,22362,22364,22366,22368,22370,22372,22374,22376,22378,22380,22382,22384,22386,22388,22390,22392,22394,22396,22398,22400,22402,22404,22406,22408,22410,22412,22414,22416,22418,22420,22422,22424,22426,22428,22430,22432,22434,22436,22438,22440,22442,22444,22446,22448,22450,22452,22454,22456,22458,22460,22462,22464,22466,22468,22470,22472,22474,22476,22478,22480,22482,22484,22486,22488,22490,22492,22494,22496,22498,22500,22502,22504,22506,22508,22510,22512,22514,22516,22518,22520,22522,22524,22526,22528,22530,22532,22534,22536,22538,22540,22542,22544,22546,22548,22550,22552,22554,22556,22558,22560,22562,22564,22566,22568,22570,22572,22574,22576,22578,22580,22582,22584,22586,22588,22590,22592,22594,22596,22598,22600,22602,22604,22606,22608,22610,22612,22614,22616,22618,22620,22622,22624,22626,22628,22630,22632,22634,22636,22638,22640,22642,22644,22646,22648,22650,22652,22654,22656,22658,22660,22662,22664,22666,22668,22670,22672,22674,22676,22678,22680,22682,22684,22686,22688,22690,22692,22694,22696,22698,22700,22702,22704,22706,22708,22710,22712,22714,22716,22718,22720,22722,22724,22726,22728,22730,22732,22734,22736,22738,22740,22742,22744,22746,22748,22750,22752,22754,22756,22758,22760,22762,22764,22766,22768,22770,22772,22774,22776,22778,22780,22782,22784,22786,22788,22790,22792,22794,22796,22798,22800,22802,22804,22806,22808,22810,22812,22814,22816,22818,22820,22822,22824,22826,22828,22830,22832,22834,22836,22838,22840,22842,22844,22846,22848,22850,22852,22854,22856,22858,22860,22862,22864,22866,22868,22870,22872,22874,22876,22878,22880,22882,22884,22886,22888,22890,22892,22894,22896,22898,22900,22902,22904,22906,22908,22910,22912,22914,22916,22918,22920,22922,22924,22926,22928,22930,22932,22934,22936,22938,22940,22942,22944,22946,22948,22950,22952,22954,22956,22958,22960,22962,22964,22966,22968,22970,22972,22974,22976,22978,22980,22982,22984,22986,22988,22990,22992,22994,22996,22998,23000,23002,23004,23006,23008,23010,23012,23014,23016,23018,23020,23022,23024,23026,23028,23030,23032,23034,23036,23038,23040,23042,23044,23046,23048,23050,23052,23054,23056,23058,23060,23062,23064,23066,23068,23070,23072,23074,23076,23078,23080,23082,23084,23086,23088,23090,23092,23094,23096,23098,23100,23102,23104,23106,23108,23110,23112,23114,23116,23118,23120,23122,23124,23126,23128,23130,23132,23134,23136,23138,23140,23142,23144,23146,23148,23150,23152,23154,23156,23158,23160,23162,23164,23166,23168,23170,23172,23174,23176,23178,23180,23182,23184,23186,23188,23190,23192,23194,23196,23198,23200,23202,23204,23206,23208,23210,23212,23214,23216,23218,23220,23222,23224,23226,23228,23230,23232,23234,23236,23238,23240,23242,23244,23246,23248,23250,23252,23254,23256,23258,23260,23262,23264,23266,23268,23270,23272,23274,23276,23278,23280,23282,23284,23286,23288,23290,23292,23294,23296,23298,23300,23302,23304,23306,23308,23310,23312,23314,23316,23318,23320,23322,23324,23326,23328,23330,23332,23334,23336,23338,23340,23342,23344,23346,23348,23350,23352,23354,23356,23358,23360,23362,23364,23366,23368,23370,23372,23374,23376,23378,23380,23382,23384,23386,23388,23390,23392,23394,23396,23398,23400,23402,23404,23406,23408,23410,23412,23414,23416,23418,23420,23422,23424,23426,23428,23430,23432,23434,23436,23438,23440,23442,23444,23446,23448,23450,23452,23454,23456,23458,23460,23462,23464,23466,23468,23470,23472,23474,23476,23478,23480,23482,23484,23486,23488,23490,23492,23494,23496,23498,23500,23502,23504,23506,23508,23510,23512,23514,23516,23518,23520,23522,23524,23526,23528,23530,23532,23534,23536,23538,23540,23542,23544,23546,23548,23550,23552,23554,23556,23558,23560,23562,23564,23566,23568,23570,23572,23574,23576,23578,23580,23582,23584,23586,23588,23590,23592,23594,23596,23598,23600,23602,23604,23606,23608,23610,23612,23614,23616,23618,23620,23622,23624,23626,23628,23630,23632,23634,23636,23638,23640,23642,23644,23646,23648,23650,23652,23654,23656,23658,23660,23662,23664,23666,23668,23670,23672,23674,23676,23678,23680,23682,23684,23686,23688,23690,23692,23694,23696,23698,23700,23702,23704,23706,23708,23710,23712,23714,23716,23718,23720,23722,23724,23726,23728,23730,23732,23734,23736,23738,23740,23742,23744,23746,23748,23750,23752,23754,23756,23758,23760,23762,23764,23766,23768,23770,23772,23774,23776,23778,23780,23782,23784,23786,23788,23790,23792,23794,23796,23798,23800,23802,23804,23806,23808,23810,23812,23814,23816,23818,23820,23822,23824,23826,23828,23830,23832,23834,23836,23838,23840,23842,23844,23846,23848,23850,23852,23854,23856,23858,23860,23862,23864,23866,23868,23870,23872,23874,23876,23878,23880,23882,23884,23886,23888,23890,23892,23894,23896,23898,23900,23902,23904,23906,23908,23910,23912,23914,23916,23918,23920,23922,23924,23926,23928,23930,23932,23934,23936,23938,23940,23942,23944,23946,23948,23950,23952,23954,23956,23958,23960,23962,23964,23966,23968,23970,23972,23974,23976,23978,23980,23982,23984,23986,23988,23990,23992,23994,23996,23998,24000,24002,24004,24006,24008,24010,24012,24014,24016,24018,24020,24022,24024,24026,24028,24030,24032,24034,24036,24038,24040,24042,24044,24046,24048,24050,24052,24054,24056,24058,24060,24062,24064,24066,24068,24070,24072,24074,24076,24078,24080,24082,24084,24086,24088,24090,24092,24094,24096,24098,24100,24102,24104,24106,24108,24110,24112,24114,24116,24118,24120,24122,24124,24126,24128,24130,24132,24134,24136,24138,24140,24142,24144,24146,24148,24150,24152,24154,24156,24158,24160,24162,24164,24166,24168,24170,24172,24174,24176,24178,24180,24182,24184,24186,24188,24190,24192,24194,24196,24198,24200,24202,24204,24206,24208,24210,24212,24214,24216,24218,24220,24222,24224,24226,24228,24230,24232,24234,24236,24238,24240,24242,24244,24246,24248,24250,24252,24254,24256,24258,24260,24262,24264,24266,24268,24270,24272,24274,24276,24278,24280,24282,24284,24286,24288,24290,24292,24294,24296,24298,24300,24302,24304,24306,24308,24310,24312,24314,24316,24318,24320,24322,24324,24326,24328,24330,24332,24334,24336,24338,24340,24342,24344,24346,24348,24350,24352,24354,24356,24358,24360,24362,24364,24366,24368,24370,24372,24374,24376,24378,24380,24382,24384,24386,24388,24390,24392,24394,24396,24398,24400,24402,24404,24406,24408,24410,24412,24414,24416,24418,24420,24422,24424,24426,24428,24430,24432,24434,24436,24438,24440,24442,24444,24446,24448,24450,24452,24454,24456,24458,24460,24462,24464,24466,24468,24470,24472,24474,24476,24478,24480,24482,24484,24486,24488,24490,24492,24494,24496,24498,24500,24502,24504,24506,24508,24510,24512,24514,24516,24518,24520,24522,24524,24526,24528,24530,24532,24534,24536,24538,24540,24542,24544,24546,24548,24550,24552,24554,24556,24558,24560,24562,24564,24566,24568,24570,24572,24574,24576,24578,24580,24582,24584,24586,24588,24590,24592,24594,24596,24598,24600,24602,24604,24606,24608,24610,24612,24614,24616,24618,24620,24622,24624,24626,24628,24630,24632,24634,24636,24638,24640,24642,24644,24646,24648,24650,24652,24654,24656,24658,24660,24662,24664,24666,24668,24670,24672,24674,24676,24678,24680,24682,24684,24686,24688,24690,24692,24694,24696,24698,24700,24702,24704,24706,24708,24710,24712,24714,24716,24718,24720,24722,24724,24726,24728,24730,24732,24734,24736,24738,24740,24742,24744,24746,24748,24750,24752,24754,24756,24758,24760,24762,24764,24766,24768,24770,24772,24774,24776,24778,24780,24782,24784,24786,24788,24790,24792,24794,24796,24798,24800,24802,24804,24806,24808,24810,24812,24814,24816,24818,24820,24822,24824,24826,24828,24830,24832,24834,24836,24838,24840,24842,24844,24846,24848,24850,24852,24854,24856,24858,24860,24862,24864,24866,24868,24870,24872,24874,24876,24878,24880,24882,24884,24886,24888,24890,24892,24894,24896,24898,24900,24902,24904,24906,24908,24910,24912,24914,24916,24918,24920,24922,24924,24926,24928,24930,24932,24934,24936,24938,24940,24942,24944,24946,24948,24950,24952,24954,24956,24958,24960,24962,24964,24966,24968,24970,24972,24974,24976,24978,24980,24982,24984,24986,24988,24990,24992,24994,24996,24998,25000,25002,25004,25006,25008,25010,25012,25014,25016,25018,25020,25022,25024,25026,25028,25030,25032,25034,25036,25038,25040,25042,25044,25046,25048,25050,25052,25054,25056,25058,25060,25062,25064,25066,25068,25070,25072,25074,25076,25078,25080,25082,25084,25086,25088,25090,25092,25094,25096,25098,25100,25102,25104,25106,25108,25110,25112,25114,25116,25118,25120,25122,25124,25126,25128,25130,25132,25134,25136,25138,25140,25142,25144,25146,25148,25150,25152,25154,25156,25158,25160,25162,25164,25166,25168,25170,25172,25174,25176,25178,25180,25182,25184,25186,25188,25190,25192,25194,25196]
,16021
)
assert result3 == [8010, 8011]
print("OK")
| true
| true
|
7906ef96b10419d4a73523d05c620c325fc37ac4
| 2,584
|
py
|
Python
|
components/Actuators/HighLevel/feederMap.py
|
Raptacon/Robot-2022
|
f59c6a6ebd5779a2fd91181b65cbcd677507ca5d
|
[
"MIT"
] | 4
|
2022-01-31T14:05:31.000Z
|
2022-03-26T14:12:45.000Z
|
components/Actuators/HighLevel/feederMap.py
|
Raptacon/Robot-2022
|
f59c6a6ebd5779a2fd91181b65cbcd677507ca5d
|
[
"MIT"
] | 57
|
2022-01-13T02:41:31.000Z
|
2022-03-26T14:50:42.000Z
|
components/Actuators/HighLevel/feederMap.py
|
Raptacon/Robot-2022
|
f59c6a6ebd5779a2fd91181b65cbcd677507ca5d
|
[
"MIT"
] | null | null | null |
from robotMap import XboxMap
from components.Actuators.LowLevel.shooterMotors import ShooterMotors
from components.Actuators.LowLevel.intakeMotor import IntakeMotor
from components.Actuators.HighLevel.hopperMotor import HopperMotor
from utils.DirectionEnums import Direction
from enum import Enum, auto
from magicbot import tunable
import logging as log
class Type(Enum):
"""Enumeration for the two types within the feeder."""
kIntake = auto()
kHopper = auto()
class FeederMap:
"""Simple map that holds the logic for running elements of the feeder."""
compatString = ["doof", "teapot"]
shooterMotors: ShooterMotors
intakeMotor: IntakeMotor
hopperMotor: HopperMotor
xboxMap: XboxMap
loaderMotorSpeed = tunable(.2)
intakeMotorSpeed = tunable(.5)
def on_enable(self):
pass
# log.setLevel(logging.DEBUG)
def run(self, loaderFunc):
"""Called when execution of a feeder element is desired."""
if loaderFunc == Type.kIntake:
if self.xboxMap.getDriveLeftTrig() > 0 and self.xboxMap.getDriveRightTrig() == 0:
self.intakeMotor.runIntake(self.intakeMotorSpeed, Direction.kForwards)
log.debug("right trig intake", self.xboxMap.getMechRightTrig())
elif self.xboxMap.getDriveRightTrig() > 0 and self.xboxMap.getDriveLeftTrig() == 0:
self.intakeMotor.runIntake(self.intakeMotorSpeed, Direction.kBackwards)
log.debug("left trig intake", self.xboxMap.getMechLeftTrig())
else:
self.intakeMotor.runIntake(0, Direction.kForwards)
if loaderFunc == Type.kHopper:
if self.xboxMap.getDriveLeftTrig() > 0 and self.xboxMap.getDriveRightTrig() == 0:
self.hopperMotor.runHopperMotorForeside(self.loaderMotorSpeed, Direction.kForwards)
self.hopperMotor.runHopperMotorBackside(self.loaderMotorSpeed, Direction.kForwards)
log.debug("right trig manual", self.xboxMap.getMechRightTrig())
elif self.xboxMap.getDriveRightTrig() > 0 and self.xboxMap.getDriveLeftTrig() == 0:
self.hopperMotor.runHopperMotorForeside(self.loaderMotorSpeed, Direction.kBackwards)
self.hopperMotor.runHopperMotorBackside(self.loaderMotorSpeed, Direction.kBackwards)
log.debug("left trig manual", self.xboxMap.getMechLeftTrig())
else:
self.hopperMotor.stopHopperMotorBackside()
self.hopperMotor.stopHopperMotorForeside()
def execute(self):
pass
| 41.015873
| 100
| 0.689241
|
from robotMap import XboxMap
from components.Actuators.LowLevel.shooterMotors import ShooterMotors
from components.Actuators.LowLevel.intakeMotor import IntakeMotor
from components.Actuators.HighLevel.hopperMotor import HopperMotor
from utils.DirectionEnums import Direction
from enum import Enum, auto
from magicbot import tunable
import logging as log
class Type(Enum):
kIntake = auto()
kHopper = auto()
class FeederMap:
compatString = ["doof", "teapot"]
shooterMotors: ShooterMotors
intakeMotor: IntakeMotor
hopperMotor: HopperMotor
xboxMap: XboxMap
loaderMotorSpeed = tunable(.2)
intakeMotorSpeed = tunable(.5)
def on_enable(self):
pass
def run(self, loaderFunc):
if loaderFunc == Type.kIntake:
if self.xboxMap.getDriveLeftTrig() > 0 and self.xboxMap.getDriveRightTrig() == 0:
self.intakeMotor.runIntake(self.intakeMotorSpeed, Direction.kForwards)
log.debug("right trig intake", self.xboxMap.getMechRightTrig())
elif self.xboxMap.getDriveRightTrig() > 0 and self.xboxMap.getDriveLeftTrig() == 0:
self.intakeMotor.runIntake(self.intakeMotorSpeed, Direction.kBackwards)
log.debug("left trig intake", self.xboxMap.getMechLeftTrig())
else:
self.intakeMotor.runIntake(0, Direction.kForwards)
if loaderFunc == Type.kHopper:
if self.xboxMap.getDriveLeftTrig() > 0 and self.xboxMap.getDriveRightTrig() == 0:
self.hopperMotor.runHopperMotorForeside(self.loaderMotorSpeed, Direction.kForwards)
self.hopperMotor.runHopperMotorBackside(self.loaderMotorSpeed, Direction.kForwards)
log.debug("right trig manual", self.xboxMap.getMechRightTrig())
elif self.xboxMap.getDriveRightTrig() > 0 and self.xboxMap.getDriveLeftTrig() == 0:
self.hopperMotor.runHopperMotorForeside(self.loaderMotorSpeed, Direction.kBackwards)
self.hopperMotor.runHopperMotorBackside(self.loaderMotorSpeed, Direction.kBackwards)
log.debug("left trig manual", self.xboxMap.getMechLeftTrig())
else:
self.hopperMotor.stopHopperMotorBackside()
self.hopperMotor.stopHopperMotorForeside()
def execute(self):
pass
| true
| true
|
7906f07907466d943eea03ec2b740451f5109b2c
| 5,968
|
py
|
Python
|
selenium_test/sele_test_mail_login.py
|
ivanlevsky/cowabunga-potato
|
ab317582b7b8f99d7be3ea4f5edbe9829fc398fb
|
[
"MIT"
] | null | null | null |
selenium_test/sele_test_mail_login.py
|
ivanlevsky/cowabunga-potato
|
ab317582b7b8f99d7be3ea4f5edbe9829fc398fb
|
[
"MIT"
] | null | null | null |
selenium_test/sele_test_mail_login.py
|
ivanlevsky/cowabunga-potato
|
ab317582b7b8f99d7be3ea4f5edbe9829fc398fb
|
[
"MIT"
] | null | null | null |
from selenium_test.selenium_utils import *
from file_and_system.windows_os_utils import WindowsOsUtil
from python_common.global_param import GlobalParam
from http_request.request_utils import request_download_file_by_url
import cv2 as cv
import time
WindowsOsUtil.kill_process_by_name('MicrosoftWebDriver.exe')
# mail_lists=['mail.hoperun.com', 'mail.qq.com', 'mail.163.com]
mail_lists = ['mail.163.com']
mail_driver = init_driver('edge', GlobalParam.get_edge_driver_path())
open_browser_multi_tab(mail_driver, mail_lists)
wait_for_page_full_loaded(mail_driver)
def hoperun_login(hoperun_driver, user_name, user_pass):
hoperun_driver.execute_script("document.getElementById('usernameTip').removeAttribute('readonly');")
element = find_element_by_id(hoperun_driver, 'usernameTip')
element.click()
element = find_element_by_id(hoperun_driver, 'username')
element.send_keys(user_name)
element = find_element_by_id(hoperun_driver, 'userType')
element.click()
element = find_element_by_id(hoperun_driver, 'userTypePwd')
element.send_keys(user_pass)
element = find_element_by_id(hoperun_driver, 'wmSubBtn')
element.click()
def hoperun_check_mail(hoperun_driver, mail_sender, mail_title):
wait_for_frame_and_switch_to_frame(hoperun_driver, 'treeBox')
element = find_element_by_id(hoperun_driver, 'tree_folder_1_span')
element.click()
wait_for_page_full_loaded(hoperun_driver)
wait_for_frame_and_switch_to_frame(hoperun_driver, 'tabsHome')
wait_for_page_full_loaded(hoperun_driver)
element = hoperun_driver.find_elements_by_xpath(''.join(('//div[text()="', mail_sender, '"]/../../../..')))
for e in element:
if e.find_element_by_xpath('li[2]/div[3]/span').text.__contains__(mail_title):
e.find_element_by_xpath('li[2]/div[3]/span').click()
def qq_login(qq_driver, user_name, user_pass):
element = find_element_by_id(qq_driver, 'qqLoginTab')
element.click()
qq_driver.switch_to.frame('login_frame')
element = find_element_by_id(qq_driver, 'u')
element.click()
element.send_keys(user_name)
element = find_element_by_id(qq_driver, 'p')
element.click()
element.send_keys(user_pass)
element = find_element_by_id(qq_driver, 'login_button')
element.click()
wait_for_frame_and_switch_to_frame(qq_driver, 'tcaptcha_iframe')
img_element = find_element_by_id(qq_driver, 'slideBg')
wait_for_element_appeared(qq_driver, img_element)
big = img_element.get_attribute('src')
request_download_file_by_url(big, GlobalParam.get_test_image_path() + 'test_qq_mail_big.png')
img_element = find_element_by_id(qq_driver, 'slideBlock')
wait_for_element_appeared(qq_driver, img_element)
small = img_element.get_attribute('src')
request_download_file_by_url(small, GlobalParam.get_test_image_path() + 'test_qq_mail_small.png')
def netcase_163_login(netcase_163_driver, user_name, user_pass):
netcase_login_frame = netcase_163_driver.find_element_by_tag_name('iframe')
wait_for_frame_and_switch_to_frame(netcase_163_driver, netcase_login_frame)
wait_for_element_exist(netcase_163_driver, '//input[@name="email"]')
element = find_element_by_name(netcase_163_driver, 'email')
element.click()
element.send_keys(user_name)
wait_for_element_exist(netcase_163_driver, '//input[@name="password"]')
element = find_element_by_name(netcase_163_driver, 'password')
element.click()
element.send_keys(user_pass)
element = find_element_by_id(netcase_163_driver, 'dologin')
element.click()
# ------------------------security mail captcha not show----------------------
# wait_for_element_exist(netcase_163_driver,'//div[@class="yidun_panel"]')
# element = find_element_by_class_name(netcase_163_driver, 'yidun_panel')
# netcase_163_driver.execute_script("arguments[0].style['display'] = 'block';",element)
# # element = find_element_by_class_name(netcase_163_driver, 'yidun_bg-img')
# # netcase_mail_captcha = element.get_attribute('src')
# # request_download_file_by_url(netcase_mail_captcha, test_image_path+'test_netcase_mail_captcha.png')
# time.sleep(4)
# element = find_element_by_class_name(netcase_163_driver, 'yidun_refresh')
# element.click()
#
# element = find_element_by_class_name(netcase_163_driver, 'yidun_tips__point')
# print(element.location)
#
# # element = find_element_by_class_name(netcase_163_driver, 'yidun_tips__point')
# # print(element.get_attribute("innerHTML"))
# ------------------------security mail captcha not show----------------------
def netcase_163_check_mail(netcase_163_driver, mail_sender, mail_title):
wait_for_element_to_be_clickable(netcase_163_driver, '//div[@id="_mail_component_140_140"]/span[@title="收件箱"]')
time.sleep(2)
# rF0 kw0 nui-txt-flag0 : not read
# rF0 nui-txt-flag0 : readed
# element = netcase_163_driver.find_elements_by_xpath('//div[@class="rF0 nui-txt-flag0"]/div/div[2]/span')
element = netcase_163_driver.find_elements_by_xpath('//div[@class="rF0 nui-txt-flag0"]')
for e in element:
print(e.find_element_by_xpath('.//div/div[2]/span').text)
# if e.text.__contains__(mail_title):
# print(e.text)
def qq_captcha_pass():
big_image = cv.imread(GlobalParam.get_test_image_path() + 'test_qq_mail_big.png')
small_image = cv.imread(GlobalParam.get_test_image_path() + 'test_qq_mail_small.png')
cv.imshow('1', small_image)
cv.waitKey(0)
def netcase_captcha_pass():
return ''
# login hoperun mail and check mail
# hoperun_login(mail_driver, 'user', 'password')
# wait_for_page_full_loaded(mail_driver)
# hoperun_check_mail(mail_driver, 'sender', 'title')
netcase_163_login(mail_driver, '****', '****')
wait_for_page_full_loaded(mail_driver)
netcase_163_check_mail(mail_driver, '', '123')
# qq_login(mail_driver, '', '')
# netcase_163_login(mail_driver, '', '')
# captcha_pass()
| 44.537313
| 115
| 0.744135
|
from selenium_test.selenium_utils import *
from file_and_system.windows_os_utils import WindowsOsUtil
from python_common.global_param import GlobalParam
from http_request.request_utils import request_download_file_by_url
import cv2 as cv
import time
WindowsOsUtil.kill_process_by_name('MicrosoftWebDriver.exe')
mail_lists = ['mail.163.com']
mail_driver = init_driver('edge', GlobalParam.get_edge_driver_path())
open_browser_multi_tab(mail_driver, mail_lists)
wait_for_page_full_loaded(mail_driver)
def hoperun_login(hoperun_driver, user_name, user_pass):
hoperun_driver.execute_script("document.getElementById('usernameTip').removeAttribute('readonly');")
element = find_element_by_id(hoperun_driver, 'usernameTip')
element.click()
element = find_element_by_id(hoperun_driver, 'username')
element.send_keys(user_name)
element = find_element_by_id(hoperun_driver, 'userType')
element.click()
element = find_element_by_id(hoperun_driver, 'userTypePwd')
element.send_keys(user_pass)
element = find_element_by_id(hoperun_driver, 'wmSubBtn')
element.click()
def hoperun_check_mail(hoperun_driver, mail_sender, mail_title):
wait_for_frame_and_switch_to_frame(hoperun_driver, 'treeBox')
element = find_element_by_id(hoperun_driver, 'tree_folder_1_span')
element.click()
wait_for_page_full_loaded(hoperun_driver)
wait_for_frame_and_switch_to_frame(hoperun_driver, 'tabsHome')
wait_for_page_full_loaded(hoperun_driver)
element = hoperun_driver.find_elements_by_xpath(''.join(('//div[text()="', mail_sender, '"]/../../../..')))
for e in element:
if e.find_element_by_xpath('li[2]/div[3]/span').text.__contains__(mail_title):
e.find_element_by_xpath('li[2]/div[3]/span').click()
def qq_login(qq_driver, user_name, user_pass):
element = find_element_by_id(qq_driver, 'qqLoginTab')
element.click()
qq_driver.switch_to.frame('login_frame')
element = find_element_by_id(qq_driver, 'u')
element.click()
element.send_keys(user_name)
element = find_element_by_id(qq_driver, 'p')
element.click()
element.send_keys(user_pass)
element = find_element_by_id(qq_driver, 'login_button')
element.click()
wait_for_frame_and_switch_to_frame(qq_driver, 'tcaptcha_iframe')
img_element = find_element_by_id(qq_driver, 'slideBg')
wait_for_element_appeared(qq_driver, img_element)
big = img_element.get_attribute('src')
request_download_file_by_url(big, GlobalParam.get_test_image_path() + 'test_qq_mail_big.png')
img_element = find_element_by_id(qq_driver, 'slideBlock')
wait_for_element_appeared(qq_driver, img_element)
small = img_element.get_attribute('src')
request_download_file_by_url(small, GlobalParam.get_test_image_path() + 'test_qq_mail_small.png')
def netcase_163_login(netcase_163_driver, user_name, user_pass):
netcase_login_frame = netcase_163_driver.find_element_by_tag_name('iframe')
wait_for_frame_and_switch_to_frame(netcase_163_driver, netcase_login_frame)
wait_for_element_exist(netcase_163_driver, '//input[@name="email"]')
element = find_element_by_name(netcase_163_driver, 'email')
element.click()
element.send_keys(user_name)
wait_for_element_exist(netcase_163_driver, '//input[@name="password"]')
element = find_element_by_name(netcase_163_driver, 'password')
element.click()
element.send_keys(user_pass)
element = find_element_by_id(netcase_163_driver, 'dologin')
element.click()
# ------------------------security mail captcha not show----------------------
# wait_for_element_exist(netcase_163_driver,'//div[@class="yidun_panel"]')
# element = find_element_by_class_name(netcase_163_driver, 'yidun_panel')
# netcase_163_driver.execute_script("arguments[0].style['display'] = 'block';",element)
# # element = find_element_by_class_name(netcase_163_driver, 'yidun_bg-img')
# # netcase_mail_captcha = element.get_attribute('src')
# # request_download_file_by_url(netcase_mail_captcha, test_image_path+'test_netcase_mail_captcha.png')
# time.sleep(4)
# element = find_element_by_class_name(netcase_163_driver, 'yidun_refresh')
# element.click()
#
# element = find_element_by_class_name(netcase_163_driver, 'yidun_tips__point')
# print(element.location)
#
# # element = find_element_by_class_name(netcase_163_driver, 'yidun_tips__point')
# # print(element.get_attribute("innerHTML"))
# ------------------------security mail captcha not show----------------------
def netcase_163_check_mail(netcase_163_driver, mail_sender, mail_title):
wait_for_element_to_be_clickable(netcase_163_driver, '//div[@id="_mail_component_140_140"]/span[@title="收件箱"]')
time.sleep(2)
# rF0 kw0 nui-txt-flag0 : not read
# rF0 nui-txt-flag0 : readed
# element = netcase_163_driver.find_elements_by_xpath('//div[@class="rF0 nui-txt-flag0"]/div/div[2]/span')
element = netcase_163_driver.find_elements_by_xpath('//div[@class="rF0 nui-txt-flag0"]')
for e in element:
print(e.find_element_by_xpath('.//div/div[2]/span').text)
# if e.text.__contains__(mail_title):
# print(e.text)
def qq_captcha_pass():
big_image = cv.imread(GlobalParam.get_test_image_path() + 'test_qq_mail_big.png')
small_image = cv.imread(GlobalParam.get_test_image_path() + 'test_qq_mail_small.png')
cv.imshow('1', small_image)
cv.waitKey(0)
def netcase_captcha_pass():
return ''
# login hoperun mail and check mail
# hoperun_login(mail_driver, 'user', 'password')
# wait_for_page_full_loaded(mail_driver)
# hoperun_check_mail(mail_driver, 'sender', 'title')
netcase_163_login(mail_driver, '****', '****')
wait_for_page_full_loaded(mail_driver)
netcase_163_check_mail(mail_driver, '', '123')
# qq_login(mail_driver, '', '')
# netcase_163_login(mail_driver, '', '')
# captcha_pass()
| true
| true
|
7906f187e6c2c173257b7915f6ff0719f60b38d1
| 7,832
|
py
|
Python
|
tests/test_gateway_mqtt.py
|
jslove/pymysensors
|
0555397b2985a0d69bf3c5d615001aaea2d79b89
|
[
"MIT"
] | null | null | null |
tests/test_gateway_mqtt.py
|
jslove/pymysensors
|
0555397b2985a0d69bf3c5d615001aaea2d79b89
|
[
"MIT"
] | null | null | null |
tests/test_gateway_mqtt.py
|
jslove/pymysensors
|
0555397b2985a0d69bf3c5d615001aaea2d79b89
|
[
"MIT"
] | null | null | null |
"""Test mysensors MQTT gateway with unittest."""
import os
import tempfile
import time
from unittest import TestCase, main, mock
from mysensors import ChildSensor, Sensor
from mysensors.gateway_mqtt import MQTTGateway
class TestMQTTGateway(TestCase):
"""Test the MQTT Gateway."""
def setUp(self):
"""Set up gateway."""
self.mock_pub = mock.Mock()
self.mock_sub = mock.Mock()
self.gateway = MQTTGateway(self.mock_pub, self.mock_sub)
def tearDown(self):
"""Stop MQTTGateway if alive."""
if self.gateway.is_alive():
self.gateway.stop()
def _add_sensor(self, sensorid):
"""Add sensor node. Return sensor node instance."""
self.gateway.sensors[sensorid] = Sensor(sensorid)
return self.gateway.sensors[sensorid]
def test_send(self):
"""Test send method."""
self.gateway.send('1;1;1;0;1;20\n')
self.mock_pub.assert_called_with('/1/1/1/0/1', '20', 0, True)
def test_send_empty_string(self):
"""Test send method with empty string."""
self.gateway.send('')
self.assertFalse(self.mock_pub.called)
def test_send_error(self):
"""Test send method with error on publish."""
self.mock_pub.side_effect = ValueError(
'Publish topic cannot contain wildcards.')
with self.assertLogs(level='ERROR') as test_handle:
self.gateway.send('1;1;1;0;1;20\n')
self.mock_pub.assert_called_with('/1/1/1/0/1', '20', 0, True)
self.assertEqual(
# only check first line of error log
test_handle.output[0].split('\n', 1)[0],
'ERROR:mysensors.gateway_mqtt:Publish to /1/1/1/0/1 failed: '
'Publish topic cannot contain wildcards.')
def test_recv(self):
"""Test recv method."""
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
self.gateway.recv('/1/1/2/0/1', '', 0)
ret = self.gateway.handle_queue()
self.assertEqual(ret, '1;1;1;0;1;20\n')
self.gateway.recv('/1/1/2/0/1', '', 1)
ret = self.gateway.handle_queue()
self.assertEqual(ret, '1;1;1;1;1;20\n')
def test_recv_wrong_prefix(self):
"""Test recv method with wrong topic prefix."""
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
self.gateway.recv('wrong/1/1/2/0/1', '', 0)
ret = self.gateway.handle_queue()
self.assertEqual(ret, None)
def test_presentation(self):
"""Test handle presentation message."""
self._add_sensor(1)
self.gateway.logic('1;1;0;0;7;Humidity Sensor\n')
calls = [
mock.call('/1/1/1/+/+', self.gateway.recv, 0),
mock.call('/1/1/2/+/+', self.gateway.recv, 0),
mock.call('/1/+/4/+/+', self.gateway.recv, 0)]
self.mock_sub.assert_has_calls(calls)
def test_presentation_no_sensor(self):
"""Test handle presentation message without sensor."""
self.gateway.logic('1;1;0;0;7;Humidity Sensor\n')
self.assertFalse(self.mock_sub.called)
def test_subscribe_error(self):
"""Test subscribe throws error."""
self._add_sensor(1)
self.mock_sub.side_effect = ValueError(
'No topic specified, or incorrect topic type.')
with self.assertLogs(level='ERROR') as test_handle:
self.gateway.logic('1;1;0;0;7;Humidity Sensor\n')
calls = [
mock.call('/1/1/1/+/+', self.gateway.recv, 0),
mock.call('/1/1/2/+/+', self.gateway.recv, 0)]
self.mock_sub.assert_has_calls(calls)
self.assertEqual(
# only check first line of error log
test_handle.output[0].split('\n', 1)[0],
'ERROR:mysensors.gateway_mqtt:Subscribe to /1/1/1/+/+ failed: '
'No topic specified, or incorrect topic type.')
def test_start_stop_gateway(self):
"""Test start and stop of MQTT gateway."""
self.assertFalse(self.gateway.is_alive())
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
self.gateway.recv('/1/1/2/0/1', '', 0)
self.gateway.recv('/1/1/1/0/1', '30', 0)
self.gateway.recv('/1/1/2/0/1', '', 0)
self.gateway.start()
self.assertTrue(self.gateway.is_alive())
calls = [
mock.call('/+/+/0/+/+', self.gateway.recv, 0),
mock.call('/+/+/3/+/+', self.gateway.recv, 0)]
self.mock_sub.assert_has_calls(calls)
time.sleep(0.05)
calls = [
mock.call('/1/1/1/0/1', '20', 0, True),
mock.call('/1/1/1/0/1', '30', 0, True)]
self.mock_pub.assert_has_calls(calls)
self.gateway.stop()
self.gateway.join(timeout=0.5)
self.assertFalse(self.gateway.is_alive())
def test_mqtt_load_persistence(self):
"""Test load persistence file for MQTTGateway."""
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
with tempfile.TemporaryDirectory() as temp_dir:
self.gateway.persistence_file = os.path.join(temp_dir, 'file.json')
# pylint: disable=protected-access
self.gateway._save_sensors()
del self.gateway.sensors[1]
self.assertNotIn(1, self.gateway.sensors)
self.gateway._safe_load_sensors()
self.assertEqual(
self.gateway.sensors[1].children[1].id,
sensor.children[1].id)
self.assertEqual(
self.gateway.sensors[1].children[1].type,
sensor.children[1].type)
self.assertEqual(
self.gateway.sensors[1].children[1].values,
sensor.children[1].values)
calls = [
mock.call('/1/1/1/+/+', self.gateway.recv, 0),
mock.call('/1/1/2/+/+', self.gateway.recv, 0),
mock.call('/1/+/4/+/+', self.gateway.recv, 0)]
self.mock_sub.assert_has_calls(calls)
class TestMQTTGatewayCustomPrefix(TestCase):
"""Test the MQTT Gateway with custom topic prefix."""
def setUp(self):
"""Set up test."""
self.mock_pub = mock.Mock()
self.mock_sub = mock.Mock()
self.gateway = None
def _setup(self, in_prefix, out_prefix):
"""Set up gateway."""
self.gateway = MQTTGateway(
self.mock_pub, self.mock_sub, in_prefix=in_prefix,
out_prefix=out_prefix)
def _add_sensor(self, sensorid):
"""Add sensor node. Return sensor node instance."""
self.gateway.sensors[sensorid] = Sensor(sensorid)
return self.gateway.sensors[sensorid]
def test_nested_prefix(self):
"""Test recv method with nested topic prefix."""
self._setup('test/test-in', 'test/test-out')
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
self.gateway.recv('test/test-in/1/1/2/0/1', '', 0)
ret = self.gateway.handle_queue()
self.assertEqual(ret, '1;1;1;0;1;20\n')
self.gateway.recv('test/test-in/1/1/2/0/1', '', 1)
ret = self.gateway.handle_queue()
self.assertEqual(ret, '1;1;1;1;1;20\n')
if __name__ == '__main__':
main()
| 39.16
| 79
| 0.598698
|
import os
import tempfile
import time
from unittest import TestCase, main, mock
from mysensors import ChildSensor, Sensor
from mysensors.gateway_mqtt import MQTTGateway
class TestMQTTGateway(TestCase):
def setUp(self):
self.mock_pub = mock.Mock()
self.mock_sub = mock.Mock()
self.gateway = MQTTGateway(self.mock_pub, self.mock_sub)
def tearDown(self):
if self.gateway.is_alive():
self.gateway.stop()
def _add_sensor(self, sensorid):
self.gateway.sensors[sensorid] = Sensor(sensorid)
return self.gateway.sensors[sensorid]
def test_send(self):
self.gateway.send('1;1;1;0;1;20\n')
self.mock_pub.assert_called_with('/1/1/1/0/1', '20', 0, True)
def test_send_empty_string(self):
self.gateway.send('')
self.assertFalse(self.mock_pub.called)
def test_send_error(self):
self.mock_pub.side_effect = ValueError(
'Publish topic cannot contain wildcards.')
with self.assertLogs(level='ERROR') as test_handle:
self.gateway.send('1;1;1;0;1;20\n')
self.mock_pub.assert_called_with('/1/1/1/0/1', '20', 0, True)
self.assertEqual(
test_handle.output[0].split('\n', 1)[0],
'ERROR:mysensors.gateway_mqtt:Publish to /1/1/1/0/1 failed: '
'Publish topic cannot contain wildcards.')
def test_recv(self):
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
self.gateway.recv('/1/1/2/0/1', '', 0)
ret = self.gateway.handle_queue()
self.assertEqual(ret, '1;1;1;0;1;20\n')
self.gateway.recv('/1/1/2/0/1', '', 1)
ret = self.gateway.handle_queue()
self.assertEqual(ret, '1;1;1;1;1;20\n')
def test_recv_wrong_prefix(self):
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
self.gateway.recv('wrong/1/1/2/0/1', '', 0)
ret = self.gateway.handle_queue()
self.assertEqual(ret, None)
def test_presentation(self):
self._add_sensor(1)
self.gateway.logic('1;1;0;0;7;Humidity Sensor\n')
calls = [
mock.call('/1/1/1/+/+', self.gateway.recv, 0),
mock.call('/1/1/2/+/+', self.gateway.recv, 0),
mock.call('/1/+/4/+/+', self.gateway.recv, 0)]
self.mock_sub.assert_has_calls(calls)
def test_presentation_no_sensor(self):
self.gateway.logic('1;1;0;0;7;Humidity Sensor\n')
self.assertFalse(self.mock_sub.called)
def test_subscribe_error(self):
self._add_sensor(1)
self.mock_sub.side_effect = ValueError(
'No topic specified, or incorrect topic type.')
with self.assertLogs(level='ERROR') as test_handle:
self.gateway.logic('1;1;0;0;7;Humidity Sensor\n')
calls = [
mock.call('/1/1/1/+/+', self.gateway.recv, 0),
mock.call('/1/1/2/+/+', self.gateway.recv, 0)]
self.mock_sub.assert_has_calls(calls)
self.assertEqual(
test_handle.output[0].split('\n', 1)[0],
'ERROR:mysensors.gateway_mqtt:Subscribe to /1/1/1/+/+ failed: '
'No topic specified, or incorrect topic type.')
def test_start_stop_gateway(self):
self.assertFalse(self.gateway.is_alive())
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
self.gateway.recv('/1/1/2/0/1', '', 0)
self.gateway.recv('/1/1/1/0/1', '30', 0)
self.gateway.recv('/1/1/2/0/1', '', 0)
self.gateway.start()
self.assertTrue(self.gateway.is_alive())
calls = [
mock.call('/+/+/0/+/+', self.gateway.recv, 0),
mock.call('/+/+/3/+/+', self.gateway.recv, 0)]
self.mock_sub.assert_has_calls(calls)
time.sleep(0.05)
calls = [
mock.call('/1/1/1/0/1', '20', 0, True),
mock.call('/1/1/1/0/1', '30', 0, True)]
self.mock_pub.assert_has_calls(calls)
self.gateway.stop()
self.gateway.join(timeout=0.5)
self.assertFalse(self.gateway.is_alive())
def test_mqtt_load_persistence(self):
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
with tempfile.TemporaryDirectory() as temp_dir:
self.gateway.persistence_file = os.path.join(temp_dir, 'file.json')
self.gateway._save_sensors()
del self.gateway.sensors[1]
self.assertNotIn(1, self.gateway.sensors)
self.gateway._safe_load_sensors()
self.assertEqual(
self.gateway.sensors[1].children[1].id,
sensor.children[1].id)
self.assertEqual(
self.gateway.sensors[1].children[1].type,
sensor.children[1].type)
self.assertEqual(
self.gateway.sensors[1].children[1].values,
sensor.children[1].values)
calls = [
mock.call('/1/1/1/+/+', self.gateway.recv, 0),
mock.call('/1/1/2/+/+', self.gateway.recv, 0),
mock.call('/1/+/4/+/+', self.gateway.recv, 0)]
self.mock_sub.assert_has_calls(calls)
class TestMQTTGatewayCustomPrefix(TestCase):
def setUp(self):
self.mock_pub = mock.Mock()
self.mock_sub = mock.Mock()
self.gateway = None
def _setup(self, in_prefix, out_prefix):
self.gateway = MQTTGateway(
self.mock_pub, self.mock_sub, in_prefix=in_prefix,
out_prefix=out_prefix)
def _add_sensor(self, sensorid):
self.gateway.sensors[sensorid] = Sensor(sensorid)
return self.gateway.sensors[sensorid]
def test_nested_prefix(self):
self._setup('test/test-in', 'test/test-out')
sensor = self._add_sensor(1)
sensor.children[1] = ChildSensor(
1, self.gateway.const.Presentation.S_HUM)
sensor.children[1].values[self.gateway.const.SetReq.V_HUM] = '20'
self.gateway.recv('test/test-in/1/1/2/0/1', '', 0)
ret = self.gateway.handle_queue()
self.assertEqual(ret, '1;1;1;0;1;20\n')
self.gateway.recv('test/test-in/1/1/2/0/1', '', 1)
ret = self.gateway.handle_queue()
self.assertEqual(ret, '1;1;1;1;1;20\n')
if __name__ == '__main__':
main()
| true
| true
|
7906f2f1ceef71512cbb67e6506c799f2b8f2b1a
| 7,195
|
py
|
Python
|
backend/webserver/api/annotator.py
|
mgarbade/coco-annotator
|
44bfabde0dde140c83a45fc52cc590f2a792f7b3
|
[
"MIT"
] | null | null | null |
backend/webserver/api/annotator.py
|
mgarbade/coco-annotator
|
44bfabde0dde140c83a45fc52cc590f2a792f7b3
|
[
"MIT"
] | 27
|
2019-10-24T05:44:46.000Z
|
2020-11-26T07:29:26.000Z
|
backend/webserver/api/annotator.py
|
mgarbade/coco-annotator
|
44bfabde0dde140c83a45fc52cc590f2a792f7b3
|
[
"MIT"
] | 1
|
2019-10-10T02:34:14.000Z
|
2019-10-10T02:34:14.000Z
|
import datetime
from flask_restplus import Namespace, Resource
from flask_login import login_required, current_user
from flask import request
from ..util import query_util, coco_util, profile
from config import Config
from database import (
ImageModel,
CategoryModel,
AnnotationModel,
SessionEvent
)
api = Namespace('annotator', description='Annotator related operations')
@api.route('/data')
class AnnotatorData(Resource):
@profile
@login_required
def post(self):
"""
Called when saving data from the annotator client
"""
data = request.get_json(force=True)
image = data.get('image')
dataset = data.get('dataset')
image_id = image.get('id')
image_model = ImageModel.objects(id=image_id).first()
if image_model is None:
return {'success': False, 'message': 'Image does not exist'}, 400
# Check if current user can access dataset
db_dataset = current_user.datasets.filter(id=image_model.dataset_id).first()
if dataset is None:
return {'success': False, 'message': 'Could not find associated dataset'}
db_dataset.update(annotate_url=dataset.get('annotate_url', ''))
categories = CategoryModel.objects.all()
annotations = AnnotationModel.objects(image_id=image_id)
current_user.update(preferences=data.get('user', {}))
annotated = False
# Iterate every category passed in the data
for category in data.get('categories', []):
category_id = category.get('id')
# Find corresponding category object in the database
db_category = categories.filter(id=category_id).first()
if db_category is None:
continue
category_update = {'color': category.get('color')}
if current_user.can_edit(db_category):
category_update['keypoint_edges'] = category.get('keypoint_edges', [])
category_update['keypoint_labels'] = category.get('keypoint_labels', [])
db_category.update(**category_update)
# Iterate every annotation from the data annotations
for annotation in category.get('annotations', []):
# Find corresponding annotation object in database
annotation_id = annotation.get('id')
db_annotation = annotations.filter(id=annotation_id).first()
if db_annotation is None:
continue
# Paperjs objects are complex, so they will not always be passed. Therefor we update
# the annotation twice, checking if the paperjs exists.
# Update annotation in database
sessions = []
total_time = 0
for session in annotation.get('sessions', []):
date = datetime.datetime.fromtimestamp(int(session.get('start')) / 1e3)
model = SessionEvent(
user=current_user.username,
created_at=date,
milliseconds=session.get('milliseconds'),
tools_used=session.get('tools')
)
total_time += session.get('milliseconds')
sessions.append(model)
db_annotation.update(
add_to_set__events=sessions,
inc__milliseconds=total_time,
set__isbbox=annotation.get('isbbox', False),
set__keypoints=annotation.get('keypoints', []),
set__metadata=annotation.get('metadata'),
set__color=annotation.get('color')
)
paperjs_object = annotation.get('compoundPath', [])
# Update paperjs if it exists
if len(paperjs_object) == 2:
width = db_annotation.width
height = db_annotation.height
# Generate coco formatted segmentation data
segmentation, area, bbox = coco_util.\
paperjs_to_coco(width, height, paperjs_object)
db_annotation.update(
set__segmentation=segmentation,
set__area=area,
set__isbbox=annotation.get('isbbox', False),
set__bbox=bbox,
set__paper_object=paperjs_object,
)
if area > 0:
annotated = True
image_model.update(
set__metadata=image.get('metadata', {}),
set__annotated=annotated,
set__category_ids=image.get('category_ids', []),
set__regenerate_thumbnail=True,
set__num_annotations=annotations\
.filter(deleted=False, area__gt=0).count()
)
return {"success": True}
@api.route('/data/<int:image_id>')
class AnnotatorId(Resource):
@profile
@login_required
def get(self, image_id):
""" Called when loading from the annotator client """
image = ImageModel.objects(id=image_id)\
.exclude('events').first()
if image is None:
return {'success': False, 'message': 'Could not load image'}, 400
dataset = current_user.datasets.filter(id=image.dataset_id).first()
if dataset is None:
return {'success': False, 'message': 'Could not find associated dataset'}, 400
categories = CategoryModel.objects(deleted=False)\
.in_bulk(dataset.categories).items()
# Get next and previous image
images = ImageModel.objects(dataset_id=dataset.id, deleted=False)
pre = images.filter(file_name__lt=image.file_name).order_by('-file_name').first()
nex = images.filter(file_name__gt=image.file_name).order_by('file_name').first()
preferences = {}
if not Config.LOGIN_DISABLED:
preferences = current_user.preferences
# Generate data about the image to return to client
data = {
'image': query_util.fix_ids(image),
'categories': [],
'dataset': query_util.fix_ids(dataset),
'preferences': preferences,
'permissions': {
'dataset': dataset.permissions(current_user),
'image': image.permissions(current_user)
}
}
data['image']['previous'] = pre.id if pre else None
data['image']['next'] = nex.id if nex else None
for category in categories:
category = query_util.fix_ids(category[1])
category_id = category.get('id')
annotations = AnnotationModel.objects(image_id=image_id, category_id=category_id, deleted=False)\
.exclude('events').all()
category['show'] = True
category['visualize'] = False
category['annotations'] = [] if annotations is None else query_util.fix_ids(annotations)
data.get('categories').append(category)
return data
| 36.338384
| 109
| 0.577623
|
import datetime
from flask_restplus import Namespace, Resource
from flask_login import login_required, current_user
from flask import request
from ..util import query_util, coco_util, profile
from config import Config
from database import (
ImageModel,
CategoryModel,
AnnotationModel,
SessionEvent
)
api = Namespace('annotator', description='Annotator related operations')
@api.route('/data')
class AnnotatorData(Resource):
@profile
@login_required
def post(self):
data = request.get_json(force=True)
image = data.get('image')
dataset = data.get('dataset')
image_id = image.get('id')
image_model = ImageModel.objects(id=image_id).first()
if image_model is None:
return {'success': False, 'message': 'Image does not exist'}, 400
db_dataset = current_user.datasets.filter(id=image_model.dataset_id).first()
if dataset is None:
return {'success': False, 'message': 'Could not find associated dataset'}
db_dataset.update(annotate_url=dataset.get('annotate_url', ''))
categories = CategoryModel.objects.all()
annotations = AnnotationModel.objects(image_id=image_id)
current_user.update(preferences=data.get('user', {}))
annotated = False
for category in data.get('categories', []):
category_id = category.get('id')
db_category = categories.filter(id=category_id).first()
if db_category is None:
continue
category_update = {'color': category.get('color')}
if current_user.can_edit(db_category):
category_update['keypoint_edges'] = category.get('keypoint_edges', [])
category_update['keypoint_labels'] = category.get('keypoint_labels', [])
db_category.update(**category_update)
for annotation in category.get('annotations', []):
annotation_id = annotation.get('id')
db_annotation = annotations.filter(id=annotation_id).first()
if db_annotation is None:
continue
sessions = []
total_time = 0
for session in annotation.get('sessions', []):
date = datetime.datetime.fromtimestamp(int(session.get('start')) / 1e3)
model = SessionEvent(
user=current_user.username,
created_at=date,
milliseconds=session.get('milliseconds'),
tools_used=session.get('tools')
)
total_time += session.get('milliseconds')
sessions.append(model)
db_annotation.update(
add_to_set__events=sessions,
inc__milliseconds=total_time,
set__isbbox=annotation.get('isbbox', False),
set__keypoints=annotation.get('keypoints', []),
set__metadata=annotation.get('metadata'),
set__color=annotation.get('color')
)
paperjs_object = annotation.get('compoundPath', [])
if len(paperjs_object) == 2:
width = db_annotation.width
height = db_annotation.height
segmentation, area, bbox = coco_util.\
paperjs_to_coco(width, height, paperjs_object)
db_annotation.update(
set__segmentation=segmentation,
set__area=area,
set__isbbox=annotation.get('isbbox', False),
set__bbox=bbox,
set__paper_object=paperjs_object,
)
if area > 0:
annotated = True
image_model.update(
set__metadata=image.get('metadata', {}),
set__annotated=annotated,
set__category_ids=image.get('category_ids', []),
set__regenerate_thumbnail=True,
set__num_annotations=annotations\
.filter(deleted=False, area__gt=0).count()
)
return {"success": True}
@api.route('/data/<int:image_id>')
class AnnotatorId(Resource):
@profile
@login_required
def get(self, image_id):
image = ImageModel.objects(id=image_id)\
.exclude('events').first()
if image is None:
return {'success': False, 'message': 'Could not load image'}, 400
dataset = current_user.datasets.filter(id=image.dataset_id).first()
if dataset is None:
return {'success': False, 'message': 'Could not find associated dataset'}, 400
categories = CategoryModel.objects(deleted=False)\
.in_bulk(dataset.categories).items()
images = ImageModel.objects(dataset_id=dataset.id, deleted=False)
pre = images.filter(file_name__lt=image.file_name).order_by('-file_name').first()
nex = images.filter(file_name__gt=image.file_name).order_by('file_name').first()
preferences = {}
if not Config.LOGIN_DISABLED:
preferences = current_user.preferences
data = {
'image': query_util.fix_ids(image),
'categories': [],
'dataset': query_util.fix_ids(dataset),
'preferences': preferences,
'permissions': {
'dataset': dataset.permissions(current_user),
'image': image.permissions(current_user)
}
}
data['image']['previous'] = pre.id if pre else None
data['image']['next'] = nex.id if nex else None
for category in categories:
category = query_util.fix_ids(category[1])
category_id = category.get('id')
annotations = AnnotationModel.objects(image_id=image_id, category_id=category_id, deleted=False)\
.exclude('events').all()
category['show'] = True
category['visualize'] = False
category['annotations'] = [] if annotations is None else query_util.fix_ids(annotations)
data.get('categories').append(category)
return data
| true
| true
|
7906f46b5d41b0f95f4cb238f5651534fc506d06
| 32,803
|
py
|
Python
|
src/train_softmax.py
|
govindjeevan/facenet
|
70a7ee5c5836bc8a31935250eb2d9e818ebf1f2d
|
[
"MIT"
] | null | null | null |
src/train_softmax.py
|
govindjeevan/facenet
|
70a7ee5c5836bc8a31935250eb2d9e818ebf1f2d
|
[
"MIT"
] | null | null | null |
src/train_softmax.py
|
govindjeevan/facenet
|
70a7ee5c5836bc8a31935250eb2d9e818ebf1f2d
|
[
"MIT"
] | null | null | null |
"""Training a face recognizer with TensorFlow using softmax cross entropy loss
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import sys
import random
import tensorflow as tf
import numpy as np
import importlib
import argparse
import facenet
import lfw
import h5py
import math
import tensorflow.contrib.slim as slim
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
def main(args):
network = importlib.import_module(args.model_def)
image_size = (args.image_size, args.image_size)
subdir = datetime.strftime(datetime.now(), '%Y-%m-%d-%H-softmax-'+args.model_def.split(".")[-1]+"-"+args.data_dir.split("/")[-1])
log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
if not os.path.isdir(log_dir): # Create the log directory if it doesn't exist
os.makedirs(log_dir)
model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
if not os.path.isdir(model_dir): # Create the model directory if it doesn't exist
os.makedirs(model_dir)
stat_file_name = os.path.join(log_dir, 'stat.h5')
# Write arguments to a text file
facenet.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))
# Store some git revision info in a text file in the log directory
src_path,_ = os.path.split(os.path.realpath(__file__))
facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))
np.random.seed(seed=args.seed)
random.seed(args.seed)
dataset = facenet.get_dataset(args.data_dir)
if args.filter_filename:
dataset = filter_dataset(dataset, os.path.expanduser(args.filter_filename),
args.filter_percentile, args.filter_min_nrof_images_per_class)
if args.validation_set_split_ratio>0.0:
train_set, val_set = facenet.split_dataset(dataset, args.validation_set_split_ratio, args.min_nrof_val_images_per_class, 'SPLIT_IMAGES')
else:
train_set, val_set = dataset, []
nrof_classes = len(train_set)
print('Model directory: %s' % model_dir)
print('Log directory: %s' % log_dir)
pretrained_model = None
if args.pretrained_model:
pretrained_model = os.path.expanduser(args.pretrained_model)
print('Pre-trained model: %s' % pretrained_model)
if args.lfw_dir:
print('LFW directory: %s' % args.lfw_dir)
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
# Get the paths for the corresponding images
lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
global_step = tf.Variable(0, trainable=False)
# Get a list of image paths and their labels
image_list, label_list = facenet.get_image_paths_and_labels(train_set)
assert len(image_list)>0, 'The training set should not be empty'
val_image_list, val_label_list = facenet.get_image_paths_and_labels(val_set)
# Create a queue that produces indices into the image_list and label_list
labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
range_size = array_ops.shape(labels)[0]
index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
shuffle=True, seed=None, capacity=32)
index_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')
learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
nrof_preprocess_threads = 4
input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
dtypes=[tf.string, tf.int32, tf.int32],
shapes=[(1,), (1,), (1,)],
shared_name=None, name=None)
enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='enqueue_op')
image_batch, label_batch = facenet.create_input_pipeline(input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)
image_batch = tf.identity(image_batch, 'image_batch')
image_batch = tf.identity(image_batch, 'input')
label_batch = tf.identity(label_batch, 'label_batch')
print('Number of classes in training set: %d' % nrof_classes)
print('Number of examples in training set: %d' % len(image_list))
print('Number of classes in validation set: %d' % len(val_set))
print('Number of examples in validation set: %d' % len(val_image_list))
print('Building training graph')
# Build the inference graph
prelogits, _ = network.inference(image_batch, args.keep_probability,
phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size,
weight_decay=args.weight_decay)
logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None,
weights_initializer=slim.initializers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(args.weight_decay),
scope='Logits', reuse=False)
embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
# Norm for the prelogits
eps = 1e-4
prelogits_norm = tf.reduce_mean(tf.norm(tf.abs(prelogits)+eps, ord=args.prelogits_norm_p, axis=1))
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_norm * args.prelogits_norm_loss_factor)
# Add center loss
prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)
learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
# Calculate the average cross entropy loss across the batch
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label_batch, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
correct_prediction = tf.cast(tf.equal(tf.argmax(logits, 1), tf.cast(label_batch, tf.int64)), tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
# Calculate the total losses
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')
# Build a Graph that trains the model with one batch of examples and updates the model parameters
train_op = facenet.train(total_loss, global_step, args.optimizer,
learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)
# Create a saver
saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Start running operations on the Graph.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
with sess.as_default():
if pretrained_model:
print('Restoring pretrained model: %s' % pretrained_model)
ckpt = tf.train.get_checkpoint_state(pretrained_model)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
# Training and validation loop
print('Running training')
nrof_steps = args.max_nrof_epochs*args.epoch_size
nrof_val_samples = int(math.ceil(args.max_nrof_epochs / args.validate_every_n_epochs)) # Validate every validate_every_n_epochs as well as in the last epoch
stat = {
'loss': np.zeros((nrof_steps,), np.float32),
'center_loss': np.zeros((nrof_steps,), np.float32),
'reg_loss': np.zeros((nrof_steps,), np.float32),
'xent_loss': np.zeros((nrof_steps,), np.float32),
'prelogits_norm': np.zeros((nrof_steps,), np.float32),
'accuracy': np.zeros((nrof_steps,), np.float32),
'val_loss': np.zeros((nrof_val_samples,), np.float32),
'val_xent_loss': np.zeros((nrof_val_samples,), np.float32),
'val_accuracy': np.zeros((nrof_val_samples,), np.float32),
'lfw_accuracy': np.zeros((args.max_nrof_epochs,), np.float32),
'lfw_valrate2': np.zeros((args.max_nrof_epochs,), np.float32),
'lfw_valrate3': np.zeros((args.max_nrof_epochs,), np.float32),
'learning_rate': np.zeros((args.max_nrof_epochs,), np.float32),
'time_train': np.zeros((args.max_nrof_epochs,), np.float32),
'time_validate': np.zeros((args.max_nrof_epochs,), np.float32),
'time_evaluate': np.zeros((args.max_nrof_epochs,), np.float32),
'prelogits_hist': np.zeros((args.max_nrof_epochs, 1000), np.float32),
}
for epoch in range(1,args.max_nrof_epochs+1):
step = sess.run(global_step, feed_dict=None)
# Train for one epoch
t = time.time()
cont = train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, global_step,
total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file,
stat, cross_entropy_mean, accuracy, learning_rate,
prelogits, prelogits_center_loss, args.random_rotate, args.random_crop, args.random_flip, prelogits_norm, args.prelogits_hist_max, args.use_fixed_image_standardization)
stat['time_train'][epoch-1] = time.time() - t
if not cont:
break
t = time.time()
if len(val_image_list)>0 and ((epoch-1) % args.validate_every_n_epochs == args.validate_every_n_epochs-1 or epoch==args.max_nrof_epochs):
validate(args, sess, epoch, val_image_list, val_label_list, enqueue_op, image_paths_placeholder, labels_placeholder, control_placeholder,
phase_train_placeholder, batch_size_placeholder,
stat, total_loss, regularization_losses, cross_entropy_mean, accuracy, args.validate_every_n_epochs, args.use_fixed_image_standardization)
stat['time_validate'][epoch-1] = time.time() - t
# Save variables and the metagraph if it doesn't exist already
save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, epoch)
# Evaluate on LFW
t = time.time()
if args.lfw_dir:
evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
embeddings, label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer, stat, epoch,
args.lfw_distance_metric, args.lfw_subtract_mean, args.lfw_use_flipped_images, args.use_fixed_image_standardization)
stat['time_evaluate'][epoch-1] = time.time() - t
print('Saving statistics')
with h5py.File(stat_file_name, 'w') as f:
for key, value in stat.items():
f.create_dataset(key, data=value)
return model_dir
def find_threshold(var, percentile):
hist, bin_edges = np.histogram(var, 100)
cdf = np.float32(np.cumsum(hist)) / np.sum(hist)
bin_centers = (bin_edges[:-1]+bin_edges[1:])/2
#plt.plot(bin_centers, cdf)
threshold = np.interp(percentile*0.01, cdf, bin_centers)
return threshold
def filter_dataset(dataset, data_filename, percentile, min_nrof_images_per_class):
with h5py.File(data_filename,'r') as f:
distance_to_center = np.array(f.get('distance_to_center'))
label_list = np.array(f.get('label_list'))
image_list = np.array(f.get('image_list'))
distance_to_center_threshold = find_threshold(distance_to_center, percentile)
indices = np.where(distance_to_center>=distance_to_center_threshold)[0]
filtered_dataset = dataset
removelist = []
for i in indices:
label = label_list[i]
image = image_list[i]
if image in filtered_dataset[label].image_paths:
filtered_dataset[label].image_paths.remove(image)
if len(filtered_dataset[label].image_paths)<min_nrof_images_per_class:
removelist.append(label)
ix = sorted(list(set(removelist)), reverse=True)
for i in ix:
del(filtered_dataset[i])
return filtered_dataset
def train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, step,
loss, train_op, summary_op, summary_writer, reg_losses, learning_rate_schedule_file,
stat, cross_entropy_mean, accuracy,
learning_rate, prelogits, prelogits_center_loss, random_rotate, random_crop, random_flip, prelogits_norm, prelogits_hist_max, use_fixed_image_standardization):
batch_number = 0
if args.learning_rate>0.0:
lr = args.learning_rate
else:
lr = facenet.get_learning_rate_from_file(learning_rate_schedule_file, epoch)
if lr<=0:
return False
index_epoch = sess.run(index_dequeue_op)
label_epoch = np.array(label_list)[index_epoch]
image_epoch = np.array(image_list)[index_epoch]
# Enqueue one epoch of image paths and labels
labels_array = np.expand_dims(np.array(label_epoch),1)
image_paths_array = np.expand_dims(np.array(image_epoch),1)
control_value = facenet.RANDOM_ROTATE * random_rotate + facenet.RANDOM_CROP * random_crop + facenet.RANDOM_FLIP * random_flip + facenet.FIXED_STANDARDIZATION * use_fixed_image_standardization
control_array = np.ones_like(labels_array) * control_value
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array})
# Training loop
train_time = 0
while batch_number < args.epoch_size:
start_time = time.time()
feed_dict = {learning_rate_placeholder: lr, phase_train_placeholder:True, batch_size_placeholder:args.batch_size}
tensor_list = [loss, train_op, step, reg_losses, prelogits, cross_entropy_mean, learning_rate, prelogits_norm, accuracy, prelogits_center_loss]
if batch_number % 100 == 0:
loss_, _, step_, reg_losses_, prelogits_, cross_entropy_mean_, lr_, prelogits_norm_, accuracy_, center_loss_, summary_str = sess.run(tensor_list + [summary_op], feed_dict=feed_dict)
summary_writer.add_summary(summary_str, global_step=step_)
else:
loss_, _, step_, reg_losses_, prelogits_, cross_entropy_mean_, lr_, prelogits_norm_, accuracy_, center_loss_ = sess.run(tensor_list, feed_dict=feed_dict)
duration = time.time() - start_time
stat['loss'][step_-1] = loss_
stat['center_loss'][step_-1] = center_loss_
stat['reg_loss'][step_-1] = np.sum(reg_losses_)
stat['xent_loss'][step_-1] = cross_entropy_mean_
stat['prelogits_norm'][step_-1] = prelogits_norm_
stat['learning_rate'][epoch-1] = lr_
stat['accuracy'][step_-1] = accuracy_
stat['prelogits_hist'][epoch-1,:] += np.histogram(np.minimum(np.abs(prelogits_), prelogits_hist_max), bins=1000, range=(0.0, prelogits_hist_max))[0]
duration = time.time() - start_time
print('Epoch: [%d][%d/%d]\tTime %.3f\tLoss %2.3f\tXent %2.3f\tRegLoss %2.3f\tAccuracy %2.3f\tLr %2.5f\tCl %2.3f' %
(epoch, batch_number+1, args.epoch_size, duration, loss_, cross_entropy_mean_, np.sum(reg_losses_), accuracy_, lr_, center_loss_))
batch_number += 1
train_time += duration
# Add validation loss and accuracy to summary
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='time/total', simple_value=train_time)
summary_writer.add_summary(summary, global_step=step_)
return True
def validate(args, sess, epoch, image_list, label_list, enqueue_op, image_paths_placeholder, labels_placeholder, control_placeholder,
phase_train_placeholder, batch_size_placeholder,
stat, loss, regularization_losses, cross_entropy_mean, accuracy, validate_every_n_epochs, use_fixed_image_standardization):
print('Running forward pass on validation set')
nrof_batches = len(label_list) // args.lfw_batch_size
nrof_images = nrof_batches * args.lfw_batch_size
# Enqueue one epoch of image paths and labels
labels_array = np.expand_dims(np.array(label_list[:nrof_images]),1)
image_paths_array = np.expand_dims(np.array(image_list[:nrof_images]),1)
control_array = np.ones_like(labels_array, np.int32)*facenet.FIXED_STANDARDIZATION * use_fixed_image_standardization
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array})
loss_array = np.zeros((nrof_batches,), np.float32)
xent_array = np.zeros((nrof_batches,), np.float32)
accuracy_array = np.zeros((nrof_batches,), np.float32)
# Training loop
start_time = time.time()
for i in range(nrof_batches):
feed_dict = {phase_train_placeholder:False, batch_size_placeholder:args.lfw_batch_size}
loss_, cross_entropy_mean_, accuracy_ = sess.run([loss, cross_entropy_mean, accuracy], feed_dict=feed_dict)
loss_array[i], xent_array[i], accuracy_array[i] = (loss_, cross_entropy_mean_, accuracy_)
if i % 10 == 9:
print('.', end='')
sys.stdout.flush()
print('')
duration = time.time() - start_time
val_index = (epoch-1)//validate_every_n_epochs
stat['val_loss'][val_index] = np.mean(loss_array)
stat['val_xent_loss'][val_index] = np.mean(xent_array)
stat['val_accuracy'][val_index] = np.mean(accuracy_array)
print('Validation Epoch: %d\tTime %.3f\tLoss %2.3f\tXent %2.3f\tAccuracy %2.3f' %
(epoch, duration, np.mean(loss_array), np.mean(xent_array), np.mean(accuracy_array)))
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, log_dir, step, summary_writer, stat, epoch, distance_metric, subtract_mean, use_flipped_images, use_fixed_image_standardization):
start_time = time.time()
# Run forward pass to calculate embeddings
print('Runnning forward pass on LFW images')
# Enqueue one epoch of image paths and labels
nrof_embeddings = len(actual_issame)*2 # nrof_pairs * nrof_images_per_pair
nrof_flips = 2 if use_flipped_images else 1
nrof_images = nrof_embeddings * nrof_flips
labels_array = np.expand_dims(np.arange(0,nrof_images),1)
image_paths_array = np.expand_dims(np.repeat(np.array(image_paths),nrof_flips),1)
control_array = np.zeros_like(labels_array, np.int32)
if use_fixed_image_standardization:
control_array += np.ones_like(labels_array)*facenet.FIXED_STANDARDIZATION
if use_flipped_images:
# Flip every second image
control_array += (labels_array % 2)*facenet.FLIP
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array})
embedding_size = int(embeddings.get_shape()[1])
assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size'
nrof_batches = nrof_images // batch_size
emb_array = np.zeros((nrof_images, embedding_size))
lab_array = np.zeros((nrof_images,))
for i in range(nrof_batches):
feed_dict = {phase_train_placeholder:False, batch_size_placeholder:batch_size}
emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict)
lab_array[lab] = lab
emb_array[lab, :] = emb
if i % 10 == 9:
print('.', end='')
sys.stdout.flush()
print('')
embeddings = np.zeros((nrof_embeddings, embedding_size*nrof_flips))
if use_flipped_images:
# Concatenate embeddings for flipped and non flipped version of the images
embeddings[:,:embedding_size] = emb_array[0::2,:]
embeddings[:,embedding_size:] = emb_array[1::2,:]
else:
embeddings = emb_array
assert np.array_equal(lab_array, np.arange(nrof_images))==True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline'
_, _, accuracy, val2, val_std2, far2, val3, val_std3, far3 = lfw.evaluate(embeddings, actual_issame, nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean)
print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val2, val_std2, far2))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val3, val_std3, far3))
lfw_time = time.time() - start_time
# Add validation loss and accuracy to summary
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy))
summary.value.add(tag='lfw/val_rate2', simple_value=val2)
summary.value.add(tag='lfw/val_rate3', simple_value=val3)
summary.value.add(tag='time/lfw', simple_value=lfw_time)
summary_writer.add_summary(summary, step)
with open(os.path.join(log_dir,'lfw_result.txt'),'at') as f:
f.write('%d\t%.5f\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val2, val3))
stat['lfw_accuracy'][epoch-1] = np.mean(accuracy)
stat['lfw_valrate2'][epoch-1] = val2
stat['lfw_valrate3'][epoch-1] = val3
def save_variables_and_metagraph(sess, saver, summary_writer, model_dir, model_name, step):
# Save the model checkpoint
print('Saving variables')
start_time = time.time()
checkpoint_path = os.path.join(model_dir, 'model-%s.ckpt' % model_name)
saver.save(sess, checkpoint_path, global_step=step, write_meta_graph=False)
save_time_variables = time.time() - start_time
print('Variables saved in %.2f seconds' % save_time_variables)
metagraph_filename = os.path.join(model_dir, 'model-%s.meta' % model_name)
save_time_metagraph = 0
if not os.path.exists(metagraph_filename):
print('Saving metagraph')
start_time = time.time()
saver.export_meta_graph(metagraph_filename)
save_time_metagraph = time.time() - start_time
print('Metagraph saved in %.2f seconds' % save_time_metagraph)
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='time/save_variables', simple_value=save_time_variables)
summary.value.add(tag='time/save_metagraph', simple_value=save_time_metagraph)
summary_writer.add_summary(summary, step)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--logs_base_dir', type=str,
help='Directory where to write event logs.', default='~/logs/facenet')
parser.add_argument('--models_base_dir', type=str,
help='Directory where to write trained models and checkpoints.', default='~/models/facenet')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
parser.add_argument('--pretrained_model', type=str,
help='Load a pretrained model before training starts.')
parser.add_argument('--data_dir', type=str,
help='Path to the data directory containing aligned face patches.',
default='~/datasets/casia/casia_maxpy_mtcnnalign_182_160')
parser.add_argument('--model_def', type=str,
help='Model definition. Points to a module containing the definition of the inference graph.', default='models.inception_resnet_v1')
parser.add_argument('--max_nrof_epochs', type=int,
help='Number of epochs to run.', default=500)
parser.add_argument('--batch_size', type=int,
help='Number of images to process in a batch.', default=90)
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=160)
parser.add_argument('--epoch_size', type=int,
help='Number of batches per epoch.', default=3860)
parser.add_argument('--embedding_size', type=int,
help='Dimensionality of the embedding.', default=128)
parser.add_argument('--random_crop',
help='Performs random cropping of training images. If false, the center image_size pixels from the training images are used. ' +
'If the size of the images in the data directory is equal to image_size no cropping is performed', action='store_true')
parser.add_argument('--random_flip',
help='Performs random horizontal flipping of training images.', action='store_true')
parser.add_argument('--random_rotate',
help='Performs random rotations of training images.', action='store_true')
parser.add_argument('--use_fixed_image_standardization',
help='Performs fixed standardization of images.', action='store_true')
parser.add_argument('--keep_probability', type=float,
help='Keep probability of dropout for the fully connected layer(s).', default=1.0)
parser.add_argument('--weight_decay', type=float,
help='L2 weight regularization.', default=0.0)
parser.add_argument('--center_loss_factor', type=float,
help='Center loss factor.', default=0.0)
parser.add_argument('--center_loss_alfa', type=float,
help='Center update rate for center loss.', default=0.95)
parser.add_argument('--prelogits_norm_loss_factor', type=float,
help='Loss based on the norm of the activations in the prelogits layer.', default=0.0)
parser.add_argument('--prelogits_norm_p', type=float,
help='Norm to use for prelogits norm loss.', default=1.0)
parser.add_argument('--prelogits_hist_max', type=float,
help='The max value for the prelogits histogram.', default=10.0)
parser.add_argument('--optimizer', type=str, choices=['ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM'],
help='The optimization algorithm to use', default='ADAGRAD')
parser.add_argument('--learning_rate', type=float,
help='Initial learning rate. If set to a negative value a learning rate ' +
'schedule can be specified in the file "learning_rate_schedule.txt"', default=0.1)
parser.add_argument('--learning_rate_decay_epochs', type=int,
help='Number of epochs between learning rate decay.', default=100)
parser.add_argument('--learning_rate_decay_factor', type=float,
help='Learning rate decay factor.', default=1.0)
parser.add_argument('--moving_average_decay', type=float,
help='Exponential decay for tracking of training parameters.', default=0.9999)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
parser.add_argument('--nrof_preprocess_threads', type=int,
help='Number of preprocessing (data loading and augmentation) threads.', default=4)
parser.add_argument('--log_histograms',
help='Enables logging of weight/bias histograms in tensorboard.', action='store_true')
parser.add_argument('--learning_rate_schedule_file', type=str,
help='File containing the learning rate schedule that is used when learning_rate is set to to -1.', default='data/learning_rate_schedule.txt')
parser.add_argument('--filter_filename', type=str,
help='File containing image data used for dataset filtering', default='')
parser.add_argument('--filter_percentile', type=float,
help='Keep only the percentile images closed to its class center', default=100.0)
parser.add_argument('--filter_min_nrof_images_per_class', type=int,
help='Keep only the classes with this number of examples or more', default=0)
parser.add_argument('--validate_every_n_epochs', type=int,
help='Number of epoch between validation', default=5)
parser.add_argument('--validation_set_split_ratio', type=float,
help='The ratio of the total dataset to use for validation', default=0.0)
parser.add_argument('--min_nrof_val_images_per_class', type=float,
help='Classes with fewer images will be removed from the validation set', default=0)
# Parameters for validation on LFW
parser.add_argument('--lfw_pairs', type=str,
help='The file containing the pairs to use for validation.', default='data/pairs.txt')
parser.add_argument('--lfw_dir', type=str,
help='Path to the data directory containing aligned face patches.', default='')
parser.add_argument('--lfw_batch_size', type=int,
help='Number of images to process in a batch in the LFW test set.', default=100)
parser.add_argument('--lfw_nrof_folds', type=int,
help='Number of folds to use for cross validation. Mainly used for testing.', default=10)
parser.add_argument('--lfw_distance_metric', type=int,
help='Type of distance metric to use. 0: Euclidian, 1:Cosine similarity distance.', default=0)
parser.add_argument('--lfw_use_flipped_images',
help='Concatenates embeddings for the image and its horizontally flipped counterpart.', action='store_true')
parser.add_argument('--lfw_subtract_mean',
help='Subtract feature mean before calculating distance.', action='store_true')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| 55.692699
| 209
| 0.698137
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import sys
import random
import tensorflow as tf
import numpy as np
import importlib
import argparse
import facenet
import lfw
import h5py
import math
import tensorflow.contrib.slim as slim
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
def main(args):
network = importlib.import_module(args.model_def)
image_size = (args.image_size, args.image_size)
subdir = datetime.strftime(datetime.now(), '%Y-%m-%d-%H-softmax-'+args.model_def.split(".")[-1]+"-"+args.data_dir.split("/")[-1])
log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
if not os.path.isdir(model_dir): # Create the model directory if it doesn't exist
os.makedirs(model_dir)
stat_file_name = os.path.join(log_dir, 'stat.h5')
facenet.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))
src_path,_ = os.path.split(os.path.realpath(__file__))
facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))
np.random.seed(seed=args.seed)
random.seed(args.seed)
dataset = facenet.get_dataset(args.data_dir)
if args.filter_filename:
dataset = filter_dataset(dataset, os.path.expanduser(args.filter_filename),
args.filter_percentile, args.filter_min_nrof_images_per_class)
if args.validation_set_split_ratio>0.0:
train_set, val_set = facenet.split_dataset(dataset, args.validation_set_split_ratio, args.min_nrof_val_images_per_class, 'SPLIT_IMAGES')
else:
train_set, val_set = dataset, []
nrof_classes = len(train_set)
print('Model directory: %s' % model_dir)
print('Log directory: %s' % log_dir)
pretrained_model = None
if args.pretrained_model:
pretrained_model = os.path.expanduser(args.pretrained_model)
print('Pre-trained model: %s' % pretrained_model)
if args.lfw_dir:
print('LFW directory: %s' % args.lfw_dir)
pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
global_step = tf.Variable(0, trainable=False)
image_list, label_list = facenet.get_image_paths_and_labels(train_set)
assert len(image_list)>0, 'The training set should not be empty'
val_image_list, val_label_list = facenet.get_image_paths_and_labels(val_set)
labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
range_size = array_ops.shape(labels)[0]
index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
shuffle=True, seed=None, capacity=32)
index_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')
learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
nrof_preprocess_threads = 4
input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
dtypes=[tf.string, tf.int32, tf.int32],
shapes=[(1,), (1,), (1,)],
shared_name=None, name=None)
enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='enqueue_op')
image_batch, label_batch = facenet.create_input_pipeline(input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)
image_batch = tf.identity(image_batch, 'image_batch')
image_batch = tf.identity(image_batch, 'input')
label_batch = tf.identity(label_batch, 'label_batch')
print('Number of classes in training set: %d' % nrof_classes)
print('Number of examples in training set: %d' % len(image_list))
print('Number of classes in validation set: %d' % len(val_set))
print('Number of examples in validation set: %d' % len(val_image_list))
print('Building training graph')
prelogits, _ = network.inference(image_batch, args.keep_probability,
phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size,
weight_decay=args.weight_decay)
logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None,
weights_initializer=slim.initializers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(args.weight_decay),
scope='Logits', reuse=False)
embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
eps = 1e-4
prelogits_norm = tf.reduce_mean(tf.norm(tf.abs(prelogits)+eps, ord=args.prelogits_norm_p, axis=1))
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_norm * args.prelogits_norm_loss_factor)
prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)
learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label_batch, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
correct_prediction = tf.cast(tf.equal(tf.argmax(logits, 1), tf.cast(label_batch, tf.int64)), tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')
train_op = facenet.train(total_loss, global_step, args.optimizer,
learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)
saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
summary_op = tf.summary.merge_all()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
with sess.as_default():
if pretrained_model:
print('Restoring pretrained model: %s' % pretrained_model)
ckpt = tf.train.get_checkpoint_state(pretrained_model)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print('Running training')
nrof_steps = args.max_nrof_epochs*args.epoch_size
nrof_val_samples = int(math.ceil(args.max_nrof_epochs / args.validate_every_n_epochs))
stat = {
'loss': np.zeros((nrof_steps,), np.float32),
'center_loss': np.zeros((nrof_steps,), np.float32),
'reg_loss': np.zeros((nrof_steps,), np.float32),
'xent_loss': np.zeros((nrof_steps,), np.float32),
'prelogits_norm': np.zeros((nrof_steps,), np.float32),
'accuracy': np.zeros((nrof_steps,), np.float32),
'val_loss': np.zeros((nrof_val_samples,), np.float32),
'val_xent_loss': np.zeros((nrof_val_samples,), np.float32),
'val_accuracy': np.zeros((nrof_val_samples,), np.float32),
'lfw_accuracy': np.zeros((args.max_nrof_epochs,), np.float32),
'lfw_valrate2': np.zeros((args.max_nrof_epochs,), np.float32),
'lfw_valrate3': np.zeros((args.max_nrof_epochs,), np.float32),
'learning_rate': np.zeros((args.max_nrof_epochs,), np.float32),
'time_train': np.zeros((args.max_nrof_epochs,), np.float32),
'time_validate': np.zeros((args.max_nrof_epochs,), np.float32),
'time_evaluate': np.zeros((args.max_nrof_epochs,), np.float32),
'prelogits_hist': np.zeros((args.max_nrof_epochs, 1000), np.float32),
}
for epoch in range(1,args.max_nrof_epochs+1):
step = sess.run(global_step, feed_dict=None)
t = time.time()
cont = train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, global_step,
total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file,
stat, cross_entropy_mean, accuracy, learning_rate,
prelogits, prelogits_center_loss, args.random_rotate, args.random_crop, args.random_flip, prelogits_norm, args.prelogits_hist_max, args.use_fixed_image_standardization)
stat['time_train'][epoch-1] = time.time() - t
if not cont:
break
t = time.time()
if len(val_image_list)>0 and ((epoch-1) % args.validate_every_n_epochs == args.validate_every_n_epochs-1 or epoch==args.max_nrof_epochs):
validate(args, sess, epoch, val_image_list, val_label_list, enqueue_op, image_paths_placeholder, labels_placeholder, control_placeholder,
phase_train_placeholder, batch_size_placeholder,
stat, total_loss, regularization_losses, cross_entropy_mean, accuracy, args.validate_every_n_epochs, args.use_fixed_image_standardization)
stat['time_validate'][epoch-1] = time.time() - t
save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, epoch)
# Evaluate on LFW
t = time.time()
if args.lfw_dir:
evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
embeddings, label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer, stat, epoch,
args.lfw_distance_metric, args.lfw_subtract_mean, args.lfw_use_flipped_images, args.use_fixed_image_standardization)
stat['time_evaluate'][epoch-1] = time.time() - t
print('Saving statistics')
with h5py.File(stat_file_name, 'w') as f:
for key, value in stat.items():
f.create_dataset(key, data=value)
return model_dir
def find_threshold(var, percentile):
hist, bin_edges = np.histogram(var, 100)
cdf = np.float32(np.cumsum(hist)) / np.sum(hist)
bin_centers = (bin_edges[:-1]+bin_edges[1:])/2
#plt.plot(bin_centers, cdf)
threshold = np.interp(percentile*0.01, cdf, bin_centers)
return threshold
def filter_dataset(dataset, data_filename, percentile, min_nrof_images_per_class):
with h5py.File(data_filename,'r') as f:
distance_to_center = np.array(f.get('distance_to_center'))
label_list = np.array(f.get('label_list'))
image_list = np.array(f.get('image_list'))
distance_to_center_threshold = find_threshold(distance_to_center, percentile)
indices = np.where(distance_to_center>=distance_to_center_threshold)[0]
filtered_dataset = dataset
removelist = []
for i in indices:
label = label_list[i]
image = image_list[i]
if image in filtered_dataset[label].image_paths:
filtered_dataset[label].image_paths.remove(image)
if len(filtered_dataset[label].image_paths)<min_nrof_images_per_class:
removelist.append(label)
ix = sorted(list(set(removelist)), reverse=True)
for i in ix:
del(filtered_dataset[i])
return filtered_dataset
def train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, step,
loss, train_op, summary_op, summary_writer, reg_losses, learning_rate_schedule_file,
stat, cross_entropy_mean, accuracy,
learning_rate, prelogits, prelogits_center_loss, random_rotate, random_crop, random_flip, prelogits_norm, prelogits_hist_max, use_fixed_image_standardization):
batch_number = 0
if args.learning_rate>0.0:
lr = args.learning_rate
else:
lr = facenet.get_learning_rate_from_file(learning_rate_schedule_file, epoch)
if lr<=0:
return False
index_epoch = sess.run(index_dequeue_op)
label_epoch = np.array(label_list)[index_epoch]
image_epoch = np.array(image_list)[index_epoch]
# Enqueue one epoch of image paths and labels
labels_array = np.expand_dims(np.array(label_epoch),1)
image_paths_array = np.expand_dims(np.array(image_epoch),1)
control_value = facenet.RANDOM_ROTATE * random_rotate + facenet.RANDOM_CROP * random_crop + facenet.RANDOM_FLIP * random_flip + facenet.FIXED_STANDARDIZATION * use_fixed_image_standardization
control_array = np.ones_like(labels_array) * control_value
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array})
# Training loop
train_time = 0
while batch_number < args.epoch_size:
start_time = time.time()
feed_dict = {learning_rate_placeholder: lr, phase_train_placeholder:True, batch_size_placeholder:args.batch_size}
tensor_list = [loss, train_op, step, reg_losses, prelogits, cross_entropy_mean, learning_rate, prelogits_norm, accuracy, prelogits_center_loss]
if batch_number % 100 == 0:
loss_, _, step_, reg_losses_, prelogits_, cross_entropy_mean_, lr_, prelogits_norm_, accuracy_, center_loss_, summary_str = sess.run(tensor_list + [summary_op], feed_dict=feed_dict)
summary_writer.add_summary(summary_str, global_step=step_)
else:
loss_, _, step_, reg_losses_, prelogits_, cross_entropy_mean_, lr_, prelogits_norm_, accuracy_, center_loss_ = sess.run(tensor_list, feed_dict=feed_dict)
duration = time.time() - start_time
stat['loss'][step_-1] = loss_
stat['center_loss'][step_-1] = center_loss_
stat['reg_loss'][step_-1] = np.sum(reg_losses_)
stat['xent_loss'][step_-1] = cross_entropy_mean_
stat['prelogits_norm'][step_-1] = prelogits_norm_
stat['learning_rate'][epoch-1] = lr_
stat['accuracy'][step_-1] = accuracy_
stat['prelogits_hist'][epoch-1,:] += np.histogram(np.minimum(np.abs(prelogits_), prelogits_hist_max), bins=1000, range=(0.0, prelogits_hist_max))[0]
duration = time.time() - start_time
print('Epoch: [%d][%d/%d]\tTime %.3f\tLoss %2.3f\tXent %2.3f\tRegLoss %2.3f\tAccuracy %2.3f\tLr %2.5f\tCl %2.3f' %
(epoch, batch_number+1, args.epoch_size, duration, loss_, cross_entropy_mean_, np.sum(reg_losses_), accuracy_, lr_, center_loss_))
batch_number += 1
train_time += duration
# Add validation loss and accuracy to summary
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='time/total', simple_value=train_time)
summary_writer.add_summary(summary, global_step=step_)
return True
def validate(args, sess, epoch, image_list, label_list, enqueue_op, image_paths_placeholder, labels_placeholder, control_placeholder,
phase_train_placeholder, batch_size_placeholder,
stat, loss, regularization_losses, cross_entropy_mean, accuracy, validate_every_n_epochs, use_fixed_image_standardization):
print('Running forward pass on validation set')
nrof_batches = len(label_list) // args.lfw_batch_size
nrof_images = nrof_batches * args.lfw_batch_size
# Enqueue one epoch of image paths and labels
labels_array = np.expand_dims(np.array(label_list[:nrof_images]),1)
image_paths_array = np.expand_dims(np.array(image_list[:nrof_images]),1)
control_array = np.ones_like(labels_array, np.int32)*facenet.FIXED_STANDARDIZATION * use_fixed_image_standardization
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array})
loss_array = np.zeros((nrof_batches,), np.float32)
xent_array = np.zeros((nrof_batches,), np.float32)
accuracy_array = np.zeros((nrof_batches,), np.float32)
# Training loop
start_time = time.time()
for i in range(nrof_batches):
feed_dict = {phase_train_placeholder:False, batch_size_placeholder:args.lfw_batch_size}
loss_, cross_entropy_mean_, accuracy_ = sess.run([loss, cross_entropy_mean, accuracy], feed_dict=feed_dict)
loss_array[i], xent_array[i], accuracy_array[i] = (loss_, cross_entropy_mean_, accuracy_)
if i % 10 == 9:
print('.', end='')
sys.stdout.flush()
print('')
duration = time.time() - start_time
val_index = (epoch-1)//validate_every_n_epochs
stat['val_loss'][val_index] = np.mean(loss_array)
stat['val_xent_loss'][val_index] = np.mean(xent_array)
stat['val_accuracy'][val_index] = np.mean(accuracy_array)
print('Validation Epoch: %d\tTime %.3f\tLoss %2.3f\tXent %2.3f\tAccuracy %2.3f' %
(epoch, duration, np.mean(loss_array), np.mean(xent_array), np.mean(accuracy_array)))
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, log_dir, step, summary_writer, stat, epoch, distance_metric, subtract_mean, use_flipped_images, use_fixed_image_standardization):
start_time = time.time()
# Run forward pass to calculate embeddings
print('Runnning forward pass on LFW images')
# Enqueue one epoch of image paths and labels
nrof_embeddings = len(actual_issame)*2 # nrof_pairs * nrof_images_per_pair
nrof_flips = 2 if use_flipped_images else 1
nrof_images = nrof_embeddings * nrof_flips
labels_array = np.expand_dims(np.arange(0,nrof_images),1)
image_paths_array = np.expand_dims(np.repeat(np.array(image_paths),nrof_flips),1)
control_array = np.zeros_like(labels_array, np.int32)
if use_fixed_image_standardization:
control_array += np.ones_like(labels_array)*facenet.FIXED_STANDARDIZATION
if use_flipped_images:
# Flip every second image
control_array += (labels_array % 2)*facenet.FLIP
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array})
embedding_size = int(embeddings.get_shape()[1])
assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size'
nrof_batches = nrof_images // batch_size
emb_array = np.zeros((nrof_images, embedding_size))
lab_array = np.zeros((nrof_images,))
for i in range(nrof_batches):
feed_dict = {phase_train_placeholder:False, batch_size_placeholder:batch_size}
emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict)
lab_array[lab] = lab
emb_array[lab, :] = emb
if i % 10 == 9:
print('.', end='')
sys.stdout.flush()
print('')
embeddings = np.zeros((nrof_embeddings, embedding_size*nrof_flips))
if use_flipped_images:
# Concatenate embeddings for flipped and non flipped version of the images
embeddings[:,:embedding_size] = emb_array[0::2,:]
embeddings[:,embedding_size:] = emb_array[1::2,:]
else:
embeddings = emb_array
assert np.array_equal(lab_array, np.arange(nrof_images))==True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline'
_, _, accuracy, val2, val_std2, far2, val3, val_std3, far3 = lfw.evaluate(embeddings, actual_issame, nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean)
print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val2, val_std2, far2))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val3, val_std3, far3))
lfw_time = time.time() - start_time
# Add validation loss and accuracy to summary
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy))
summary.value.add(tag='lfw/val_rate2', simple_value=val2)
summary.value.add(tag='lfw/val_rate3', simple_value=val3)
summary.value.add(tag='time/lfw', simple_value=lfw_time)
summary_writer.add_summary(summary, step)
with open(os.path.join(log_dir,'lfw_result.txt'),'at') as f:
f.write('%d\t%.5f\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val2, val3))
stat['lfw_accuracy'][epoch-1] = np.mean(accuracy)
stat['lfw_valrate2'][epoch-1] = val2
stat['lfw_valrate3'][epoch-1] = val3
def save_variables_and_metagraph(sess, saver, summary_writer, model_dir, model_name, step):
# Save the model checkpoint
print('Saving variables')
start_time = time.time()
checkpoint_path = os.path.join(model_dir, 'model-%s.ckpt' % model_name)
saver.save(sess, checkpoint_path, global_step=step, write_meta_graph=False)
save_time_variables = time.time() - start_time
print('Variables saved in %.2f seconds' % save_time_variables)
metagraph_filename = os.path.join(model_dir, 'model-%s.meta' % model_name)
save_time_metagraph = 0
if not os.path.exists(metagraph_filename):
print('Saving metagraph')
start_time = time.time()
saver.export_meta_graph(metagraph_filename)
save_time_metagraph = time.time() - start_time
print('Metagraph saved in %.2f seconds' % save_time_metagraph)
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='time/save_variables', simple_value=save_time_variables)
summary.value.add(tag='time/save_metagraph', simple_value=save_time_metagraph)
summary_writer.add_summary(summary, step)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--logs_base_dir', type=str,
help='Directory where to write event logs.', default='~/logs/facenet')
parser.add_argument('--models_base_dir', type=str,
help='Directory where to write trained models and checkpoints.', default='~/models/facenet')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
parser.add_argument('--pretrained_model', type=str,
help='Load a pretrained model before training starts.')
parser.add_argument('--data_dir', type=str,
help='Path to the data directory containing aligned face patches.',
default='~/datasets/casia/casia_maxpy_mtcnnalign_182_160')
parser.add_argument('--model_def', type=str,
help='Model definition. Points to a module containing the definition of the inference graph.', default='models.inception_resnet_v1')
parser.add_argument('--max_nrof_epochs', type=int,
help='Number of epochs to run.', default=500)
parser.add_argument('--batch_size', type=int,
help='Number of images to process in a batch.', default=90)
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=160)
parser.add_argument('--epoch_size', type=int,
help='Number of batches per epoch.', default=3860)
parser.add_argument('--embedding_size', type=int,
help='Dimensionality of the embedding.', default=128)
parser.add_argument('--random_crop',
help='Performs random cropping of training images. If false, the center image_size pixels from the training images are used. ' +
'If the size of the images in the data directory is equal to image_size no cropping is performed', action='store_true')
parser.add_argument('--random_flip',
help='Performs random horizontal flipping of training images.', action='store_true')
parser.add_argument('--random_rotate',
help='Performs random rotations of training images.', action='store_true')
parser.add_argument('--use_fixed_image_standardization',
help='Performs fixed standardization of images.', action='store_true')
parser.add_argument('--keep_probability', type=float,
help='Keep probability of dropout for the fully connected layer(s).', default=1.0)
parser.add_argument('--weight_decay', type=float,
help='L2 weight regularization.', default=0.0)
parser.add_argument('--center_loss_factor', type=float,
help='Center loss factor.', default=0.0)
parser.add_argument('--center_loss_alfa', type=float,
help='Center update rate for center loss.', default=0.95)
parser.add_argument('--prelogits_norm_loss_factor', type=float,
help='Loss based on the norm of the activations in the prelogits layer.', default=0.0)
parser.add_argument('--prelogits_norm_p', type=float,
help='Norm to use for prelogits norm loss.', default=1.0)
parser.add_argument('--prelogits_hist_max', type=float,
help='The max value for the prelogits histogram.', default=10.0)
parser.add_argument('--optimizer', type=str, choices=['ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM'],
help='The optimization algorithm to use', default='ADAGRAD')
parser.add_argument('--learning_rate', type=float,
help='Initial learning rate. If set to a negative value a learning rate ' +
'schedule can be specified in the file "learning_rate_schedule.txt"', default=0.1)
parser.add_argument('--learning_rate_decay_epochs', type=int,
help='Number of epochs between learning rate decay.', default=100)
parser.add_argument('--learning_rate_decay_factor', type=float,
help='Learning rate decay factor.', default=1.0)
parser.add_argument('--moving_average_decay', type=float,
help='Exponential decay for tracking of training parameters.', default=0.9999)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
parser.add_argument('--nrof_preprocess_threads', type=int,
help='Number of preprocessing (data loading and augmentation) threads.', default=4)
parser.add_argument('--log_histograms',
help='Enables logging of weight/bias histograms in tensorboard.', action='store_true')
parser.add_argument('--learning_rate_schedule_file', type=str,
help='File containing the learning rate schedule that is used when learning_rate is set to to -1.', default='data/learning_rate_schedule.txt')
parser.add_argument('--filter_filename', type=str,
help='File containing image data used for dataset filtering', default='')
parser.add_argument('--filter_percentile', type=float,
help='Keep only the percentile images closed to its class center', default=100.0)
parser.add_argument('--filter_min_nrof_images_per_class', type=int,
help='Keep only the classes with this number of examples or more', default=0)
parser.add_argument('--validate_every_n_epochs', type=int,
help='Number of epoch between validation', default=5)
parser.add_argument('--validation_set_split_ratio', type=float,
help='The ratio of the total dataset to use for validation', default=0.0)
parser.add_argument('--min_nrof_val_images_per_class', type=float,
help='Classes with fewer images will be removed from the validation set', default=0)
# Parameters for validation on LFW
parser.add_argument('--lfw_pairs', type=str,
help='The file containing the pairs to use for validation.', default='data/pairs.txt')
parser.add_argument('--lfw_dir', type=str,
help='Path to the data directory containing aligned face patches.', default='')
parser.add_argument('--lfw_batch_size', type=int,
help='Number of images to process in a batch in the LFW test set.', default=100)
parser.add_argument('--lfw_nrof_folds', type=int,
help='Number of folds to use for cross validation. Mainly used for testing.', default=10)
parser.add_argument('--lfw_distance_metric', type=int,
help='Type of distance metric to use. 0: Euclidian, 1:Cosine similarity distance.', default=0)
parser.add_argument('--lfw_use_flipped_images',
help='Concatenates embeddings for the image and its horizontally flipped counterpart.', action='store_true')
parser.add_argument('--lfw_subtract_mean',
help='Subtract feature mean before calculating distance.', action='store_true')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| true
| true
|
7906f563c0009ac37695f50c9dc2b035b8f004aa
| 174,992
|
py
|
Python
|
python/paddle/fluid/layers/detection.py
|
Jeffrey28/Paddle
|
6b70e05e9345ee7907005b3840430edacdb15095
|
[
"Apache-2.0"
] | 3
|
2021-06-11T06:48:10.000Z
|
2021-09-02T10:18:06.000Z
|
python/paddle/fluid/layers/detection.py
|
92lqllearning/Paddle
|
d11c140e280880b9d031fa38361f3230aef6cf9c
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/layers/detection.py
|
92lqllearning/Paddle
|
d11c140e280880b9d031fa38361f3230aef6cf9c
|
[
"Apache-2.0"
] | 1
|
2020-11-05T08:41:11.000Z
|
2020-11-05T08:41:11.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""
from __future__ import print_function
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable
from .loss import softmax_with_cross_entropy
from . import tensor
from . import nn
from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [
'prior_box',
'density_prior_box',
'multi_box_head',
'bipartite_match',
'target_assign',
'detection_output',
'ssd_loss',
'rpn_target_assign',
'retinanet_target_assign',
'sigmoid_focal_loss',
'anchor_generator',
'roi_perspective_transform',
'generate_proposal_labels',
'generate_proposals',
'generate_mask_labels',
'iou_similarity',
'box_coder',
'polygon_box_transform',
'yolov3_loss',
'yolo_box',
'box_clip',
'multiclass_nms',
'locality_aware_nms',
'matrix_nms',
'retinanet_detection_output',
'distribute_fpn_proposals',
'box_decoder_and_assign',
'collect_fpn_proposals',
]
def retinanet_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
gt_labels,
is_crowd,
im_info,
num_classes=1,
positive_overlap=0.5,
negative_overlap=0.4):
"""
**Target Assign Layer for the detector RetinaNet.**
This OP finds out positive and negative samples from all anchors
for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
and assigns target labels for classification along with target locations for
regression to each sample, then takes out the part belonging to positive and
negative samples from category prediction( :attr:`cls_logits`) and location
prediction( :attr:`bbox_pred`) which belong to all anchors.
The searching principles for positive and negative samples are as followed:
1. Anchors are assigned to ground-truth boxes when it has the highest IoU
overlap with a ground-truth box.
2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
higher than :attr:`positive_overlap` with any ground-truth box.
3. Anchors are assigned to background when its IoU overlap is lower than
:attr:`negative_overlap` for all ground-truth boxes.
4. Anchors which do not meet the above conditions do not participate in
the training process.
Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
regression for each anchor, hence the target label for each positive(or negative)
sample is a :math:`C`-vector and the target locations for each positive sample
is a 4-vector. As for a positive sample, if the category of its assigned
ground-truth box is class :math:`i`, the corresponding entry in its length
:math:`C` label vector is set to 1 and all other entries is set to 0, its box
regression targets are computed as the offset between itself and its assigned
ground-truth box. As for a negative sample, all entries in its length :math:`C`
label vector are set to 0 and box regression targets are omitted because
negative samples do not participate in the training process of location
regression.
After the assignment, the part belonging to positive and negative samples is
taken out from category prediction( :attr:`cls_logits` ), and the part
belonging to positive samples is taken out from location
prediction( :attr:`bbox_pred` ).
Args:
bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
the predicted locations of all anchors. :math:`N` is the batch size( the
number of images in a mini-batch), :math:`M` is the number of all anchors
of one image, and each anchor has 4 coordinate values. The data type of
:attr:`bbox_pred` is float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
the predicted categories of all anchors. :math:`N` is the batch size,
:math:`M` is the number of all anchors of one image, and :math:`C` is
the number of categories (**Notice: excluding background**). The data type
of :attr:`cls_logits` is float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
the locations of all anchors. :math:`M` is the number of all anchors of
one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
:math:`[xmin, ymin]` is the left top coordinate of the anchor box,
:math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
The data type of :attr:`anchor_box` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_box`.
anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded
factors of anchor locations used in loss function. :math:`M` is number of
all anchors of one image, each anchor possesses a 4-vector expanded factor.
The data type of :attr:`anchor_var` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_var`.
gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
locations of all ground-truth boxes. :math:`G` is the total number of
all ground-truth boxes in a mini-batch, and each ground-truth box has 4
coordinate values. The data type of :attr:`gt_boxes` is float32 or
float64.
gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
categories of all ground-truth boxes, and the values are in the range of
:math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
in a mini-batch, and each ground-truth box has one category. The data type
of :attr:`gt_labels` is int32.
is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
indicates whether a ground-truth box is a crowd. If the value is 1, the
corresponding box is a crowd, it is ignored during training. :math:`G` is
the total number of all ground-truth boxes in a mini-batch. The data type
of :attr:`is_crowd` is int32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
num_classes(int32): The number of categories for classification, the default
value is 1.
positive_overlap(float32): Minimum overlap required between an anchor
and ground-truth box for the anchor to be a positive sample, the default
value is 0.5.
negative_overlap(float32): Maximum overlap allowed between an anchor
and ground-truth box for the anchor to be a negative sample, the default
value is 0.4. :attr:`negative_overlap` should be less than or equal to
:attr:`positive_overlap`, if not, the actual value of
:attr:`positive_overlap` is :attr:`negative_overlap`.
Returns:
A tuple with 6 Variables:
**predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
category prediction belonging to positive and negative samples. :math:`F`
is the number of positive samples in a mini-batch, :math:`B` is the number
of negative samples, and :math:`C` is the number of categories
(**Notice: excluding background**). The data type of :attr:`predict_scores`
is float32 or float64.
**predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
location prediction belonging to positive samples. :math:`F` is the number
of positive samples. :math:`F` is the number of positive samples, and each
sample has 4 coordinate values. The data type of :attr:`predict_location`
is float32 or float64.
**target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
target labels for classification belonging to positive and negative
samples. :math:`F` is the number of positive samples, :math:`B` is the
number of negative, and each sample has one target category. The data type
of :attr:`target_label` is int32.
**target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
target locations for box regression belonging to positive samples.
:math:`F` is the number of positive samples, and each sample has 4
coordinate values. The data type of :attr:`target_bbox` is float32 or
float64.
**bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
represents whether a positive sample is fake positive, if a positive
sample is false positive, the corresponding entries in
:attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
of total positive samples in a mini-batch, and each sample has 4
coordinate values. The data type of :attr:`bbox_inside_weight` is float32
or float64.
**fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
of positive samples. :math:`N` is the batch size. **Notice: The number
of positive samples is used as the denominator of later loss function,
to avoid the condition that the denominator is zero, this OP has added 1
to the actual number of positive samples of each image.** The data type of
:attr:`fg_num` is int32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
dtype='float32')
gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[1],
dtype='int32')
im_info = fluid.data(name='im_info', shape=[1, 3],
dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)
"""
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="retinanet_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'GtLabels': gt_labels,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight,
'ForegroundNumber': fg_num
},
attrs={
'positive_overlap': positive_overlap,
'negative_overlap': negative_overlap
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num
def rpn_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
is_crowd,
im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=True):
"""
**Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
regression targets to each each anchor, these target labels are used for
train RPN. The classification targets is a binary class label (of being
an object or not). Following the paper of Faster-RCNN, the positive labels
are two kinds of anchors: (i) the anchor/anchors with the highest IoU
overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
that a single ground-truth box may assign positive labels to multiple
anchors. A non-positive anchor is when its IoU ratio is lower than
rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
neither positive nor negative do not contribute to the training objective.
The regression targets are the encoded ground-truth boxes associated with
the positive anchors.
Args:
bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
predicted confidence predictions. N is the batch size, 1 is the
frontground and background sigmoid, M is number of bounding boxes.
The data type can be float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax],
[xmin, ymin] is the left top coordinate of the anchor box,
if the input is image feature map, they are close to the origin
of the coordinate system. [xmax, ymax] is the right bottom
coordinate of the anchor box. The data type can be float32 or float64.
anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded
variances of anchors. The data type can be float32 or float64.
gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input. The data type can be float32 or float64.
is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
The data type must be int32.
im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
3 is the height, width and scale.
rpn_batch_size_per_im(int): Total number of RPN examples per image.
The data type must be int32.
rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
by straddle_thresh pixels. The data type must be float32.
rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
rpn_positive_overlap(float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be a positive
example. The data type must be float32.
rpn_negative_overlap(float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be a negative
examples. The data type must be float32.
Returns:
tuple:
A tuple(predicted_scores, predicted_location, target_label,
target_bbox, bbox_inside_weight) is returned. The predicted_scores
and predicted_location is the predicted result of the RPN.
The target_label and target_bbox is the ground truth,
respectively. The predicted_location is a 2D Tensor with shape
[F, 4], and the shape of target_bbox is same as the shape of
the predicted_location, F is the number of the foreground
anchors. The predicted_scores is a 2D Tensor with shape
[F + B, 1], and the shape of target_label is same as the shape
of the predicted_scores, B is the number of the background
anchors, the F and B is depends on the input of this operator.
Bbox_inside_weight represents whether the predicted loc is fake_fg
or not and the shape is [F, 4].
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
"""
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'rpn_target_assign')
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op(
type="rpn_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight
},
attrs={
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
"""
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
`Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
class imbalance existed on the training phase of many computer vision tasks. This OP computes
the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
measured between the sigmoid value and target label.
The focal loss is given as followed:
.. math::
\\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
\\begin{array}{rcl}
- \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
- \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
\\end{array} \\right.
We know that
.. math::
\\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}
Args:
x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
all samples. :math:`N` is the number of all samples responsible for optimization in
a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
is the total number of positive and negative samples in a mini-batch; Samples are images
for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
float32 or float64.
label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
classification. :math:`N` is the number of all samples responsible for optimization in a
mini-batch, each sample has one target category. The values for positive samples are in the
range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
is int32.
fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
set to 2.0.
alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
is set to 0.25.
Returns:
Variable(the data type is float32 or float64):
A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
tensor :attr:`x`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
num_classes = 10 # exclude background
image_width = 16
image_height = 16
batch_size = 32
max_iter = 20
def gen_train_data():
x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
image_width)).astype('float64')
label_data = np.random.randint(0, num_classes,
(batch_size, 1)).astype('int32')
return {"x": x_data, "label": label_data}
def get_focal_loss(pred, label, fg_num, num_classes):
pred = fluid.layers.reshape(pred, [-1, num_classes])
label = fluid.layers.reshape(label, [-1, 1])
label.stop_gradient = True
loss = fluid.layers.sigmoid_focal_loss(
pred, label, fg_num, gamma=2.0, alpha=0.25)
loss = fluid.layers.reduce_sum(loss)
return loss
def build_model(mode='train'):
x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(
input=output,
size=num_classes,
# Notice: size is set to be the number of target classes (excluding backgorund)
# because sigmoid activation will be done in the sigmoid_focal_loss op.
act=None)
if mode == 'train':
label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
# Obtain the fg_num needed by the sigmoid_focal_loss op:
# 0 in label represents background, >=1 in label represents foreground,
# find the elements in label which are greater or equal than 1, then
# computed the numbers of these elements.
data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
fg_label = fluid.layers.greater_equal(label, data)
fg_label = fluid.layers.cast(fg_label, dtype='int32')
fg_num = fluid.layers.reduce_sum(fg_label)
fg_num.stop_gradient = True
avg_loss = get_focal_loss(output, label, fg_num, num_classes)
return avg_loss
else:
# During evaluating or testing phase,
# output of the final fc layer should be connected to a sigmoid layer.
pred = fluid.layers.sigmoid(output)
return pred
loss = build_model('train')
moment_optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
moment_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(max_iter):
outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
print(outs)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'sigmoid_focal_loss')
check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')
helper = LayerHelper("sigmoid_focal_loss", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="sigmoid_focal_loss",
inputs={"X": x,
"Label": label,
"FgNum": fg_num},
attrs={"gamma": gamma,
'alpha': alpha},
outputs={"Out": out})
return out
def detection_output(loc,
scores,
prior_box,
prior_box_var,
background_label=0,
nms_threshold=0.3,
nms_top_k=400,
keep_top_k=200,
score_threshold=0.01,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
1. Decode input bounding box predictions according to the prior boxes and
regression locations.
2. Get the final detection results by applying multi-class non maximum
suppression (NMS).
Please note, this operation doesn't clip the final output bounding boxes
to the image window.
Args:
loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. Data type should be
float32 or float64. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax].
scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
predicted confidence predictions. Data type should be float32
or float64. N is the batch size, C is the
class number, M is number of bounding boxes.
prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax]. Data type
should be float32 or float64.
prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
of variance. Data type should be float32 or float64.
background_label(int): The index of background label,
the background label will be ignored. If set to -1, then all
categories will be considered. Default: 0.
nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
nms_top_k(int): Maximum number of detections to be kept according
to the confidences after filtering detections based on
score_threshold and before NMS. Default: 400.
keep_top_k(int): Number of total bboxes to be kept per image after
NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
score_threshold(float): Threshold to filter out bounding boxes with
low confidence score. If not provided, consider all boxes.
Default: 0.01.
nms_eta(float): The parameter for adaptive NMS. It works only when the
value is less than 1.0. Default: 1.0.
return_index(bool): Whether return selected index. Default: False
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, a tuple with one Variable(Out) is returned.
Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
Data type is the same as input (loc). Each row has six values:
[label, confidence, xmin, ymin, xmax, ymax]. `No` is
the total number of detections in this mini-batch. For each instance,
the offsets in first dimension are called LoD, the offset number is
N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
detected results, if it is 0, the i-th image has no detected results.
Index (Variable): Only return when return_index is True. A 2-D LoDTensor
with shape [No, 1] represents the selected index which type is Integer.
The index is the absolute value cross batches. No is the same number
as Out. If the index is used to gather other attribute such as age,
one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
N is the batch size and M is the number of boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
nmsed_outs, index = fluid.layers.detection_output(scores=scores,
loc=loc,
prior_box=pb,
prior_box_var=pbv,
return_index=True)
"""
helper = LayerHelper("detection_output", **locals())
decoded_box = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=loc,
code_type='decode_center_size')
scores = nn.softmax(input=scores)
scores = nn.transpose(scores, perm=[0, 2, 1])
scores.stop_gradient = True
nmsed_outs = helper.create_variable_for_type_inference(
dtype=decoded_box.dtype)
if return_index:
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="multiclass_nms2",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs,
'Index': index},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
index.stop_gradient = True
else:
helper.append_op(
type="multiclass_nms",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
nmsed_outs.stop_gradient = True
if return_index:
return nmsed_outs, index
return nmsed_outs
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
"""
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
Args:
x (Variable): ${x_comment}.The data type is float32 or float64.
y (Variable): ${y_comment}.The data type is float32 or float64.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
Returns:
Variable: ${out_comment}.The data type is same with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.data(name='x', shape=[None, 4], dtype='float32')
y = fluid.data(name='y', shape=[None, 4], dtype='float32')
iou = fluid.layers.iou_similarity(x=x, y=y)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_iou] = exe.run(test_program,
fetch_list=iou,
feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
[0., 0., 1.0, 1.0]]).astype('float32'),
'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
# out_iou is [[0.2857143],
# [0. ]] with shape: [2, 1]
"""
helper = LayerHelper("iou_similarity", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="iou_similarity",
inputs={"X": x,
"Y": y},
attrs={"box_normalized": box_normalized},
outputs={"Out": out})
return out
@templatedoc()
def box_coder(prior_box,
prior_box_var,
target_box,
code_type="encode_center_size",
box_normalized=True,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
Encode/Decode the target bounding box with the priorbox information.
The Encoding schema described below:
.. math::
ox = (tx - px) / pw / pxv
oy = (ty - py) / ph / pyv
ow = \log(\abs(tw / pw)) / pwv
oh = \log(\abs(th / ph)) / phv
The Decoding schema described below:
.. math::
ox = (pw * pxv * tx * + px) - tw / 2
oy = (ph * pyv * ty * + py) - th / 2
ow = \exp(pwv * tw) * pw + tw / 2
oh = \exp(phv * th) * ph + th / 2
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
the priorbox's (anchor) center coordinates, width and height. `pxv`,
`pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
`ow`, `oh` denote the encoded/decoded coordinates, width and height.
During Box Decoding, two modes for broadcast are supported. Say target
box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
[M, 4]. Then prior box will broadcast to target box along the
assigned axis.
Args:
prior_box(Variable): Box list prior_box is a 2-D Tensor with shape
[M, 4] holds M boxes and data type is float32 or float64. Each box
is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
left top coordinate of the anchor box, if the input is image feature
map, they are close to the origin of the coordinate system.
[xmax, ymax] is the right bottom coordinate of the anchor box.
prior_box_var(List|Variable|None): prior_box_var supports three types
of input. One is variable with shape [M, 4] which holds M group and
data type is float32 or float64. The second is list consist of
4 elements shared by all boxes and data type is float32 or float64.
Other is None and not involved in calculation.
target_box(Variable): This input can be a 2-D LoDTensor with shape
[N, 4] when code_type is 'encode_center_size'. This input also can
be a 3-D Tensor with shape [N, M, 4] when code_type is
'decode_center_size'. Each box is represented as
[xmin, ymin, xmax, ymax]. The data type is float32 or float64.
This tensor can contain LoD information to represent a batch of inputs.
code_type(str): The code type used with the target box. It can be
`encode_center_size` or `decode_center_size`. `encode_center_size`
by default.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
axis(int): Which axis in PriorBox to broadcast for box decode,
for example, if axis is 0 and TargetBox has shape [N, M, 4] and
PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
for decoding. It is only valid when code type is
`decode_center_size`. Set 0 by default.
Returns:
Variable:
output_box(Variable): When code_type is 'encode_center_size', the
output tensor of box_coder_op with shape [N, M, 4] representing the
result of N target boxes encoded with M Prior boxes and variances.
When code_type is 'decode_center_size', N represents the batch size
and M represents the number of decoded boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# For encode
prior_box_encode = fluid.data(name='prior_box_encode',
shape=[512, 4],
dtype='float32')
target_box_encode = fluid.data(name='target_box_encode',
shape=[81, 4],
dtype='float32')
output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_encode,
code_type="encode_center_size")
# For decode
prior_box_decode = fluid.data(name='prior_box_decode',
shape=[512, 4],
dtype='float32')
target_box_decode = fluid.data(name='target_box_decode',
shape=[512, 81, 4],
dtype='float32')
output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_decode,
code_type="decode_center_size",
box_normalized=False,
axis=1)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_coder')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_coder')
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
"code_type": code_type,
"box_normalized": box_normalized,
"axis": axis
}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, list):
attrs['variance'] = prior_box_var
else:
raise TypeError("Input variance of box_coder must be Variable or lisz")
helper.append_op(
type="box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box})
return output_box
@templatedoc()
def polygon_box_transform(input, name=None):
"""
${comment}
Args:
input(Variable): The input with shape [batch_size, geometry_channels, height, width].
A Tensor with type float32, float64.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The output with the same shape as input. A Tensor with type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'polygon_box_transform')
helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="polygon_box_transform",
inputs={"Input": input},
attrs={},
outputs={"Output": output})
return output
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
gt_box,
gt_label,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
gt_score=None,
use_label_smooth=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
Args:
x (Variable): ${x_comment}The data type is float32 or float64.
gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
in the third dimension, x, y, w, h should be stored.
x,y is the center coordinate of boxes, w, h are the
width and height, x, y, w, h should be divided by
input image height to scale to [0, 1].
N is the batch number and B is the max box number in
an image.The data type is float32 or float64.
gt_label (Variable): class id of ground truth boxes, should be in shape
of [N, B].The data type is int32.
anchors (list|tuple): ${anchors_comment}
anchor_mask (list|tuple): ${anchor_mask_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
gt_score (Variable): mixup score of ground truth boxes, should be in shape
of [N, B]. Default None.
use_label_smooth (bool): ${use_label_smooth_comment}
scale_x_y (float): ${scale_x_y_comment}
Returns:
Variable: A 1-D tensor with shape [N], the value of yolov3 loss
Raises:
TypeError: Input x of yolov3_loss must be Variable
TypeError: Input gtbox of yolov3_loss must be Variable
TypeError: Input gtlabel of yolov3_loss must be Variable
TypeError: Input gtscore of yolov3_loss must be None or Variable
TypeError: Attr anchors of yolov3_loss must be list or tuple
TypeError: Attr class_num of yolov3_loss must be an integer
TypeError: Attr ignore_thresh of yolov3_loss must be a float number
TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchor_mask = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
gt_score=gt_score, anchors=anchors,
anchor_mask=anchor_mask, class_num=80,
ignore_thresh=0.7, downsample_ratio=32)
"""
helper = LayerHelper('yolov3_loss', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolov3_loss must be Variable")
if not isinstance(gt_box, Variable):
raise TypeError("Input gtbox of yolov3_loss must be Variable")
if not isinstance(gt_label, Variable):
raise TypeError("Input gtlabel of yolov3_loss must be Variable")
if gt_score is not None and not isinstance(gt_score, Variable):
raise TypeError("Input gtscore of yolov3_loss must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolov3_loss must be an integer")
if not isinstance(ignore_thresh, float):
raise TypeError(
"Attr ignore_thresh of yolov3_loss must be a float number")
if not isinstance(use_label_smooth, bool):
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": x,
"GTBox": gt_box,
"GTLabel": gt_label,
}
if gt_score is not None:
inputs["GTScore"] = gt_score
attrs = {
"anchors": anchors,
"anchor_mask": anchor_mask,
"class_num": class_num,
"ignore_thresh": ignore_thresh,
"downsample_ratio": downsample_ratio,
"use_label_smooth": use_label_smooth,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask
},
attrs=attrs)
return loss
@templatedoc(op_type="yolo_box")
def yolo_box(x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
Args:
x (Variable): ${x_comment} The data type is float32 or float64.
img_size (Variable): ${img_size_comment} The data type is int32.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
conf_thresh (float): ${conf_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
clip_bbox (bool): ${clip_bbox_comment}
scale_x_y (float): ${scale_x_y_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
scores of boxes.
Raises:
TypeError: Input x of yolov_box must be Variable
TypeError: Attr anchors of yolo box must be list or tuple
TypeError: Attr class_num of yolo box must be an integer
TypeError: Attr conf_thresh of yolo box must be a float number
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23]
boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
conf_thresh=0.01, downsample_ratio=32)
"""
helper = LayerHelper('yolo_box', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolo_box must be Variable")
if not isinstance(img_size, Variable):
raise TypeError("Input img_size of yolo_box must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolo_box must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolo_box must be an integer")
if not isinstance(conf_thresh, float):
raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs)
return boxes, scores
@templatedoc()
def detection_map(detect_res,
label,
class_num,
background_label=0,
overlap_threshold=0.3,
evaluate_difficult=True,
has_state=None,
input_states=None,
out_states=None,
ap_version='integral'):
"""
${comment}
Args:
detect_res: ${detect_res_comment}
label: ${label_comment}
class_num: ${class_num_comment}
background_label: ${background_label_comment}
overlap_threshold: ${overlap_threshold_comment}
evaluate_difficult: ${evaluate_difficult_comment}
has_state: ${has_state_comment}
input_states: (tuple|None) If not None, It contains 3 elements:
(1) pos_count ${pos_count_comment}.
(2) true_pos ${true_pos_comment}.
(3) false_pos ${false_pos_comment}.
out_states: (tuple|None) If not None, it contains 3 elements.
(1) accum_pos_count ${accum_pos_count_comment}.
(2) accum_true_pos ${accum_true_pos_comment}.
(3) accum_false_pos ${accum_false_pos_comment}.
ap_version: ${ap_type_comment}
Returns:
${map_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
from fluid.layers import detection
detect_res = fluid.data(
name='detect_res',
shape=[10, 6],
dtype='float32')
label = fluid.data(
name='label',
shape=[10, 6],
dtype='float32')
map_out = detection.detection_map(detect_res, label, 21)
"""
helper = LayerHelper("detection_map", **locals())
def __create_var(type):
return helper.create_variable_for_type_inference(dtype=type)
map_out = __create_var('float32')
accum_pos_count_out = out_states[
0] if out_states is not None else __create_var('int32')
accum_true_pos_out = out_states[
1] if out_states is not None else __create_var('float32')
accum_false_pos_out = out_states[
2] if out_states is not None else __create_var('float32')
pos_count = input_states[0] if input_states is not None else None
true_pos = input_states[1] if input_states is not None else None
false_pos = input_states[2] if input_states is not None else None
helper.append_op(
type="detection_map",
inputs={
'Label': label,
'DetectRes': detect_res,
'HasState': has_state,
'PosCount': pos_count,
'TruePos': true_pos,
'FalsePos': false_pos
},
outputs={
'MAP': map_out,
'AccumPosCount': accum_pos_count_out,
'AccumTruePos': accum_true_pos_out,
'AccumFalsePos': accum_false_pos_out
},
attrs={
'overlap_threshold': overlap_threshold,
'evaluate_difficult': evaluate_difficult,
'ap_type': ap_version,
'class_num': class_num,
})
return map_out
def bipartite_match(dist_matrix,
match_type=None,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
find the matched column for each row (matched means the largest distance),
also can find the matched row for each column. And this operator only
calculate matched indices from column to row. For each instance,
the number of matched indices is the column number of the input distance
matrix. **The OP only supports CPU**.
There are two outputs, matched indices and distance.
A simple description, this algorithm matched the best (maximum distance)
row entity to the column entity and the matched indices are not duplicated
in each row of ColToRowMatchIndices. If the column entity is not matched
any row entity, set -1 in ColToRowMatchIndices.
NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
If Tensor, the height of ColToRowMatchIndices is 1.
NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
layer. Please consider to use :code:`ssd_loss` instead.
Args:
dist_matrix(Variable): This input is a 2-D LoDTensor with shape
[K, M]. The data type is float32 or float64. It is pair-wise
distance matrix between the entities represented by each row and
each column. For example, assumed one entity is A with shape [K],
another entity is B with shape [M]. The dist_matrix[i][j] is the
distance between A[i] and B[j]. The bigger the distance is, the
better matching the pairs are. NOTE: This tensor can contain LoD
information to represent a batch of inputs. One instance of this
batch can contain different numbers of entities.
match_type(str, optional): The type of matching method, should be
'bipartite' or 'per_prediction'. None ('bipartite') by default.
dist_threshold(float32, optional): If `match_type` is 'per_prediction',
this threshold is to determine the extra matching bboxes based
on the maximum distance, 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
type is int32. N is the batch size. If match_indices[i][j] is -1, it
means B[j] does not match any entity in i-th instance.
Otherwise, it means B[j] is matched to row
match_indices[i][j] in i-th instance. The row number of
i-th instance is saved in match_indices[i][j].
matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
type is float32. N is batch size. If match_indices[i][j] is -1,
match_distance[i][j] is also -1.0. Otherwise, assumed
match_distance[i][j] = d, and the row offsets of each instance
are called LoD. Then match_distance[i][j] =
dist_matrix[d+LoD[i]][j].
Examples:
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
>>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
>>> iou = fluid.layers.iou_similarity(x=x, y=y)
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper = LayerHelper('bipartite_match', **locals())
match_indices = helper.create_variable_for_type_inference(dtype='int32')
match_distance = helper.create_variable_for_type_inference(
dtype=dist_matrix.dtype)
helper.append_op(
type='bipartite_match',
inputs={'DistMat': dist_matrix},
attrs={
'match_type': match_type,
'dist_threshold': dist_threshold,
},
outputs={
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_distance
})
return match_indices, match_distance
def target_assign(input,
matched_indices,
negative_indices=None,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
weights to prediction. The weights is used to specify which prediction would
not contribute to training loss.
For each instance, the output `out` and`out_weight` are assigned based on
`match_indices` and `negative_indices`.
Assumed that the row offset for each instance in `input` is called lod,
this operator assigns classification/regression targets by performing the
following steps:
1. Assigning all outputs based on `match_indices`:
.. code-block:: text
If id = match_indices[i][j] > 0,
out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
out_weight[i][j] = 1.
Otherwise,
out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][j] = 0.
2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
Assumed that i-th instance in `neg_indices` is called `neg_indice`,
for i-th instance:
.. code-block:: text
for id in neg_indice:
out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][id] = 1.0
Args:
input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
Data type should be int32 or float32.
matched_indices (Variable): The input matched indices
is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
the j-th entity of column is not matched to any entity of row in
i-th instance.
negative_indices (Variable, optional): The input negative example indices
are an optional input with shape [Neg, 1] and int32 type, where Neg is
the total number of negative example indices.
mismatch_value (float32, optional): Fill this value to the mismatched
location.
name (string): The default value is None. Normally there is no need for
user to set this property. For more information, please refer
to :ref:`api_guide_Name`.
Returns:
tuple: A tuple(out, out_weight) is returned.
out (Variable): a 3D Tensor with shape [N, P, K] and same data type
with `input`, N and P is the same as they are in `matched_indices`,
K is the same as it in input of X.
out_weight (Variable): the weight for output with the shape of [N, P, 1].
Data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='x',
shape=[4, 20, 4],
dtype='float',
lod_level=1)
matched_id = fluid.data(
name='indices',
shape=[8, 20],
dtype='int32')
trg, trg_weight = fluid.layers.target_assign(
x,
matched_id,
mismatch_value=0)
"""
helper = LayerHelper('target_assign', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
out_weight = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op(
type='target_assign',
inputs={
'X': input,
'MatchIndices': matched_indices,
'NegIndices': negative_indices
},
outputs={'Out': out,
'OutWeight': out_weight},
attrs={'mismatch_value': mismatch_value})
return out, out_weight
def ssd_loss(location,
confidence,
gt_box,
gt_label,
prior_box,
prior_box_var=None,
background_label=0,
overlap_threshold=0.5,
neg_pos_ratio=3.0,
neg_overlap=0.5,
loc_loss_weight=1.0,
conf_loss_weight=1.0,
match_type='per_prediction',
mining_type='max_negative',
normalize=True,
sample_size=None):
"""
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
**Multi-box loss layer for object detection algorithm of SSD**
This layer is to compute detection loss for SSD given the location offset
predictions, confidence predictions, prior boxes and ground-truth bounding
boxes and labels, and the type of hard example mining. The returned loss
is a weighted sum of the localization loss (or regression loss) and
confidence loss (or classification loss) by performing the following steps:
1. Find matched bounding box by bipartite matching algorithm.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.2 Compute matched bounding box by bipartite matching algorithm.
2. Compute confidence for mining hard examples
2.1. Get the target label based on matched indices.
2.2. Compute confidence loss.
3. Apply hard example mining to get the negative example indices and update
the matched indices.
4. Assign classification and regression targets
4.1. Encoded bbox according to the prior boxes.
4.2. Assign regression targets.
4.3. Assign classification targets.
5. Compute the overall objective loss.
5.1 Compute confidence loss.
5.2 Compute localization loss.
5.3 Compute the overall weighted loss.
Args:
location (Variable): The location predictions are a 3D Tensor with
shape [N, Np, 4], N is the batch size, Np is total number of
predictions for each instance. 4 is the number of coordinate values,
the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
float64.
confidence (Variable): The confidence predictions are a 3D Tensor
with shape [N, Np, C], N and Np are the same as they are in
`location`, C is the class number.The data type is float32 or
float64.
gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input.The data type is float32 or float64.
gt_label (Variable): The ground-truth labels are a 2D LoDTensor
with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
mini-batch input, 1 is the number of class. The data type is float32
or float64.
prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
Np and 4 are the same as they are in `location`. The data type is
float32 or float64.
prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
background_label (int): The index of background label, 0 by default.
overlap_threshold (float): If match_type is 'per_prediction', use
'overlap_threshold' to determine the extra matching bboxes when finding \
matched boxes. 0.5 by default.
neg_pos_ratio (float): The ratio of the negative boxes to the positive
boxes, used only when mining_type is 'max_negative', 3.0 by default.
neg_overlap (float): The negative overlap upper bound for the unmatched
predictions. Use only when mining_type is 'max_negative',
0.5 by default.
loc_loss_weight (float): Weight for localization loss, 1.0 by default.
conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
match_type (str): The type of matching method during training, should
be 'bipartite' or 'per_prediction', 'per_prediction' by default.
mining_type (str): The hard example mining type, should be 'hard_example'
or 'max_negative', now only support `max_negative`.
normalize (bool): Whether to normalize the SSD loss by the total number
of output locations, True by default.
sample_size (int): The max sample size of negative box, used only when
mining_type is 'hard_example'.
Returns:
Variable(Tensor): The weighted sum of the localization loss and confidence loss, \
with shape [N * Np, 1], N and Np are the same as they are in
`location`.The data type is float32 or float64.
Raises:
ValueError: If mining_type is 'hard_example', now only support mining \
type of `max_negative`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box',
shape=[10, 4],
dtype='float32')
pbv = fluid.data(
name='prior_box_var',
shape=[10, 4],
dtype='float32')
loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = fluid.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = fluid.data(
name='gt_label', shape=[1], lod_level=1, dtype='float32')
loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
"""
helper = LayerHelper('ssd_loss', **locals())
if mining_type != 'max_negative':
raise ValueError("Only support mining_type == max_negative now.")
num, num_prior, num_class = confidence.shape
conf_shape = nn.shape(confidence)
def __reshape_to_2d(var):
return nn.flatten(x=var, axis=2)
# 1. Find matched bounding box by prior box.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
iou = iou_similarity(x=gt_box, y=prior_box)
# 1.2 Compute matched bounding box by bipartite matching algorithm.
matched_indices, matched_dist = bipartite_match(iou, match_type,
overlap_threshold)
# 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices
gt_label = nn.reshape(
x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
gt_label.stop_gradient = True
target_label, _ = target_assign(
gt_label, matched_indices, mismatch_value=background_label)
# 2.2. Compute confidence loss.
# Reshape confidence to 2D tensor.
confidence = __reshape_to_2d(confidence)
target_label = tensor.cast(x=target_label, dtype='int64')
target_label = __reshape_to_2d(target_label)
target_label.stop_gradient = True
conf_loss = softmax_with_cross_entropy(confidence, target_label)
# 3. Mining hard examples
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape.stop_gradient = True
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
conf_loss = nn.reshape(
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
conf_loss.stop_gradient = True
neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype
updated_matched_indices = helper.create_variable_for_type_inference(
dtype=dtype)
helper.append_op(
type='mine_hard_examples',
inputs={
'ClsLoss': conf_loss,
'LocLoss': None,
'MatchIndices': matched_indices,
'MatchDist': matched_dist,
},
outputs={
'NegIndices': neg_indices,
'UpdatedMatchIndices': updated_matched_indices
},
attrs={
'neg_pos_ratio': neg_pos_ratio,
'neg_dist_threshold': neg_overlap,
'mining_type': mining_type,
'sample_size': sample_size,
})
# 4. Assign classification and regression targets
# 4.1. Encoded bbox according to the prior boxes.
encoded_bbox = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=gt_box,
code_type='encode_center_size')
# 4.2. Assign regression targets
target_bbox, target_loc_weight = target_assign(
encoded_bbox, updated_matched_indices, mismatch_value=background_label)
# 4.3. Assign classification targets
target_label, target_conf_weight = target_assign(
gt_label,
updated_matched_indices,
negative_indices=neg_indices,
mismatch_value=background_label)
# 5. Compute loss.
# 5.1 Compute confidence loss.
target_label = __reshape_to_2d(target_label)
target_label = tensor.cast(x=target_label, dtype='int64')
conf_loss = softmax_with_cross_entropy(confidence, target_label)
target_conf_weight = __reshape_to_2d(target_conf_weight)
conf_loss = conf_loss * target_conf_weight
# the target_label and target_conf_weight do not have gradient.
target_label.stop_gradient = True
target_conf_weight.stop_gradient = True
# 5.2 Compute regression loss.
location = __reshape_to_2d(location)
target_bbox = __reshape_to_2d(target_bbox)
loc_loss = nn.smooth_l1(location, target_bbox)
target_loc_weight = __reshape_to_2d(target_loc_weight)
loc_loss = loc_loss * target_loc_weight
# the target_bbox and target_loc_weight do not have gradient.
target_bbox.stop_gradient = True
target_loc_weight.stop_gradient = True
# 5.3 Compute overall weighted loss.
loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
# reshape to [N, Np], N is the batch size and Np is the prior box number.
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
if normalize:
normalizer = nn.reduce_sum(target_loc_weight)
loss = loss / normalizer
return loss
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
the data type should be float32 or float64.
min_sizes(list|tuple|float): the min sizes of generated prior boxes.
max_sizes(list|tuple|None): the max sizes of generated prior boxes.
Default: None.
aspect_ratios(list|tuple|float): the aspect ratios of generated
prior boxes. Default: [1.].
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes(Variable): the output prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input,
num_priors is the total box count of each position of input.
variances(Variable): the expanded variances of PriorBox.
4-D tensor, the layput is [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_priors is the total box count of each position of input
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(fluid.default_main_program(),
feed={"input":input_data,"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (6, 9, 1, 4)
# print(var_out.shape)
# (6, 9, 1, 4)
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
# print(box.shape)
# [6L, 9L, 1L, 4L]
# print(var.shape)
# [6L, 9L, 1L, 4L]
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order
}
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def density_prior_box(input,
image,
densities=None,
fixed_sizes=None,
fixed_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
clip=False,
steps=[0.0, 0.0],
offset=0.5,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
determined by the count of densities, fixed_sizes and fixed_ratios.
Boxes center at grid points around each input position is generated by
this operator, and the grid points is determined by densities and
the count of density prior box is determined by fixed_sizes and fixed_ratios.
Obviously, the number of fixed_sizes is equal to the number of densities.
For densities_i in densities:
.. math::
N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)
N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
the layout is NCHW.
densities(list|tuple|None): The densities of generated density prior
boxes, this attribute should be a list or tuple of integers.
Default: None.
fixed_sizes(list|tuple|None): The fixed sizes of generated density
prior boxes, this attribute should a list or tuple of same
length with :attr:`densities`. Default: None.
fixed_ratios(list|tuple|None): The fixed ratios of generated density
prior boxes, if this attribute is not set and :attr:`densities`
and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
to generate density prior boxes.
variance(list|tuple): The variances to be encoded in density prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
clip(bool): Whether to clip out of boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
flatten_to_2d(bool): Whether to flatten output prior boxes and variance
to 2D shape, the second dim is 4. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output density prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True,
flatten_to_2d=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(
fluid.default_main_program(),
feed={"input":input_data,
"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (1134, 4)
# print(var_out.shape)
# (1134, 4)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True)
# print(box.shape)
# [6L, 9L, 21L, 4L]
# print(var.shape)
# [6L, 9L, 21L, 4L]
"""
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'density_prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
check_type(densities, 'densities', (list, tuple), 'density_prior_box')
check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
if len(densities) != len(fixed_sizes):
raise ValueError('densities and fixed_sizes length should be euqal.')
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
densities = list(map(int, densities))
fixed_sizes = list(map(float, fixed_sizes))
fixed_ratios = list(map(float, fixed_ratios))
steps = list(map(float, steps))
attrs = {
'variances': variance,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'densities': densities,
'fixed_sizes': fixed_sizes,
'fixed_ratios': fixed_ratios,
'flatten_to_2d': flatten_to_2d,
}
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="density_prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
"""
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
maps, then output the concatenate results. The details of this algorithm,
please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs (list(Variable)|tuple(Variable)): The list of input variables,
the format of all Variables are 4-D Tensor, layout is NCHW.
Data type should be float32 or float64.
image (Variable): The input image, layout is NCHW. Data type should be
the same as inputs.
base_size(int): the base_size is input image size. When len(inputs) > 2
and `min_size` and `max_size` are None, the `min_size` and `max_size`
are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
formula is as follows:
.. code-block:: text
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
num_classes(int): The number of classes.
aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
prior boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): The default value is None. Normally there is no need
for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc (Variable): The predicted boxes' location of the inputs. The
layout is [N, num_priors, 4], where N is batch size, ``num_priors``
is the number of prior boxes. Data type is the same as input.
mbox_conf (Variable): The predicted boxes' confidence of the inputs.
The layout is [N, num_priors, C], where ``N`` and ``num_priors``
has the same meaning as above. C is the number of Classes.
Data type is the same as input.
boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
The meaning of num_priors is the same as above.
Data type is the same as input.
variances (Variable): the expanded variances for prior boxes.
The layout is [num_priors, 4]. Data type is the same as input.
Examples 1: set min_ratio and max_ratio:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
Examples 2: set min_sizes and max_sizes:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h is not None:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w is not None:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps is not None:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = nn.reshape(
mbox_confs_concat, shape=[0, -1, num_classes])
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var
def anchor_generator(input,
anchor_sizes=None,
aspect_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
stride=None,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
is firstly aspect_ratios loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple, optional): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
variance(list|tuple, optional): The variances to be used in box
regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
default.
stride(list|tuple, optional): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32, optional): Prior boxes center offset. 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and None
by default.
Returns:
Tuple:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_anchors is the box count of each position.
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
"""
helper = LayerHelper("anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError('stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).')
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'stride': stride,
'offset': offset
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor,
"Variances": var},
attrs=attrs, )
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
def roi_perspective_transform(input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0,
name=None):
"""
**The** `rois` **of this op should be a LoDTensor.**
ROI perspective transform op applies perspective transform to map each roi into an
rectangular region. Perspective transform is a type of transformation in linear algebra.
Parameters:
input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
input tensor is NCHW. Where N is batch size, C is the
number of input channels, H is the height of the feature,
and W is the width of the feature. The data type is float32.
rois (Variable): 2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
[[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
top left coordinates, and (x2, y2) is the top right
coordinates, and (x3, y3) is the bottom right coordinates,
and (x4, y4) is the bottom left coordinates. The data type is the
same as `input`
transformed_height (int): The height of transformed output.
transformed_width (int): The width of transformed output.
spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A tuple with three Variables. (out, mask, transform_matrix)
out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, 1, transformed_h, transformed_w). The data type is int32
transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`
Return Type:
tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
"""
check_variable_and_dtype(input, 'input', ['float32'],
'roi_perspective_transform')
check_variable_and_dtype(rois, 'rois', ['float32'],
'roi_perspective_transform')
check_type(transformed_height, 'transformed_height', int,
'roi_perspective_transform')
check_type(transformed_width, 'transformed_width', int,
'roi_perspective_transform')
check_type(spatial_scale, 'spatial_scale', float,
'roi_perspective_transform')
helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype="int32")
transform_matrix = helper.create_variable_for_type_inference(dtype)
out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
out2in_w = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_perspective_transform",
inputs={"X": input,
"ROIs": rois},
outputs={
"Out": out,
"Out2InIdx": out2in_idx,
"Out2InWeights": out2in_w,
"Mask": mask,
"TransformMatrix": transform_matrix
},
attrs={
"transformed_height": transformed_height,
"transformed_width": transformed_width,
"spatial_scale": spatial_scale
})
return out, mask, transform_matrix
def generate_proposal_labels(rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
to sample foreground boxes and background boxes, and compute loss target.
RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
After all foreground and background boxes are chosen (so called Rois),
then we apply random sampling to make sure
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
Args:
rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.
batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
class_nums(int): Class number. The data type must be int32.
use_random(bool): Use random sampling to choose foreground and background boxes.
is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
Returns:
tuple:
A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.
- **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
- **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
- **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
- **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
- **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
class_nums=10)
"""
helper = LayerHelper('generate_proposal_labels', **locals())
check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
'generate_proposal_labels')
check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
'generate_proposal_labels')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'generate_proposal_labels')
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op(
type="generate_proposal_labels",
inputs={
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info
},
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic,
'is_cascade_rcnn': is_cascade_rcnn
})
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
to sample foreground RoIs. This mask branch also has
a :math: `K \\times M^{2}` dimensional output targets for each foreground
RoI, which encodes K binary masks of resolution M x M, one for each of the
K classes. This mask targets are used to compute loss of mask branch.
Please note, the data format of groud-truth segmentation, assumed the
segmentations are as follows. The first instance has two gt objects.
The second instance has one gt object, this object has two gt segmentations.
.. code-block:: python
#[
# [[[229.14, 370.9, 229.14, 370.9, ...]],
# [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
# [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
#]
batch_masks = []
for semgs in batch_semgs:
gt_masks = []
for semg in semgs:
gt_segm = []
for polys in semg:
gt_segm.append(np.array(polys).reshape(-1, 2))
gt_masks.append(gt_segm)
batch_masks.append(gt_masks)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=feeds)
feeder.feed(batch_masks)
Args:
im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
data type. N is the batch size, each element is
[height, width, scale] of image. Image scale is
target_size / original_size, target_size is the size after resize,
original_size is the original image size.
gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
should be int. M is the total number of ground-truth, each
element is a class label.
is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
as gt_classes, each element is a flag indicating whether a
groundtruth is crowd.
gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
float32 data type, it's LoD level is 3.
Usually users do not needs to understand LoD,
The users should return correct data format in reader.
The LoD[0] represents the ground-truth objects number of
each instance. LoD[1] represents the segmentation counts of each
objects. LoD[2] represents the polygons number of each segmentation.
S the total number of polygons coordinate points. Each element is
(x, y) coordinate points.
rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
float32. R is the total number of RoIs, each element is a bounding
box with (xmin, ymin, xmax, ymax) format in the range of original image.
labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
of int32. R is the same as it in `rois`. Each element represents
a class label of a RoI.
num_classes (int): Class number.
resolution (int): Resolution of mask predictions.
Returns:
mask_rois (Variable): A 2D LoDTensor with shape [P, 4] and same data
type as `rois`. P is the total number of sampled RoIs. Each element
is a bounding box with [xmin, ymin, xmax, ymax] format in range of
original image size.
mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
and int data type, each element represents the output mask RoI
index with regard to input RoIs.
mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
data type, K is the classes number and M is the resolution of mask
predictions. Each element represents the binary mask targets.
Examples:
.. code-block:: python
import paddle.fluid as fluid
im_info = fluid.data(name="im_info", shape=[None, 3],
dtype="float32")
gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
dtype="float32", lod_level=1)
is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
dtype="float32", lod_level=1)
gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
dtype="float32", lod_level=3)
# rois, roi_labels can be the output of
# fluid.layers.generate_proposal_labels.
rois = fluid.data(name="rois", shape=[None, 4],
dtype="float32", lod_level=1)
roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
dtype="int32", lod_level=1)
mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_masks,
rois=rois,
labels_int32=roi_labels,
num_classes=81,
resolution=14)
"""
helper = LayerHelper('generate_mask_labels', **locals())
mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
roi_has_mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
helper.append_op(
type="generate_mask_labels",
inputs={
'ImInfo': im_info,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtSegms': gt_segms,
'Rois': rois,
'LabelsInt32': labels_int32
},
outputs={
'MaskRois': mask_rois,
'RoiHasMaskInt32': roi_has_mask_int32,
'MaskInt32': mask_int32
},
attrs={'num_classes': num_classes,
'resolution': resolution})
mask_rois.stop_gradient = True
roi_has_mask_int32.stop_gradient = True
mask_int32.stop_gradient = True
return mask_rois, roi_has_mask_int32, mask_int32
def generate_proposals(scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
name=None,
return_rois_num=False):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores
to be an object are the output of RPN. Final proposals
could be used to train detection net.
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
represents the difference between predicted box location and
anchor location. The data type must be float32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
image information for N batch. Height and width are the input sizes
and scale is the ratio of network input size and original size.
The data type can be float32 or float64.
anchors(Variable): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 4]. Each variance is in
(xcenter, ycenter, w, h) format. The data type must be float32.
pre_nms_top_n(float): Number of total bboxes to be kept per
image before NMS. The data type must be float32. `6000` by default.
post_nms_top_n(float): Number of total bboxes to be kept per
image after NMS. The data type must be float32. `1000` by default.
nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size(float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
'False' by default.
Returns:
tuple:
A tuple with format ``(rpn_rois, rpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
im_info, anchors, variances)
"""
helper = LayerHelper('generate_proposals', **locals())
check_variable_and_dtype(scores, 'scores', ['float32'],
'generate_proposals')
check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
'generate_proposals')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'generate_proposals')
check_variable_and_dtype(anchors, 'anchors', ['float32'],
'generate_proposals')
check_variable_and_dtype(variances, 'variances', ['float32'],
'generate_proposals')
rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
rpn_rois_lod = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="generate_proposals",
inputs={
'Scores': scores,
'BboxDeltas': bbox_deltas,
'ImInfo': im_info,
'Anchors': anchors,
'Variances': variances
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size,
'eta': eta
},
outputs={
'RpnRois': rpn_rois,
'RpnRoiProbs': rpn_roi_probs,
'RpnRoisLod': rpn_rois_lod
})
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
rpn_rois_lod.stop_gradient = True
if return_rois_num:
return rpn_rois, rpn_roi_probs, rpn_rois_lod
else:
return rpn_rois, rpn_roi_probs
def box_clip(input, im_info, name=None):
"""
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
.. code-block:: text
xmin = max(min(xmin, im_w - 1), 0)
ymin = max(min(ymin, im_h - 1), 0)
xmax = max(min(xmax, im_w - 1), 0)
ymax = max(min(ymax, im_h - 1), 0)
where im_w and im_h are computed from im_info:
.. code-block:: text
im_h = round(height / scale)
im_w = round(weight / scale)
Args:
input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
the last dimension is 4 and data type is float32 or float64.
im_info(Variable): The 2-D Tensor with shape [N, 3] with layout
(height, width, scale) representing the information of image.
Height and width are the input sizes and scale is the ratio of network input
size and original size. The data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The clipped tensor with data type float32 or float64.
The shape is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3])
out = fluid.layers.box_clip(
input=boxes, im_info=im_info)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'box_clip')
helper = LayerHelper("box_clip", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
inputs = {"Input": input, "ImInfo": im_info}
helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
return output
def retinanet_detection_output(bboxes,
scores,
anchors,
im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0):
"""
**Detection Output Layer for the detector RetinaNet.**
In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many
`FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
and location predictions, this OP is to get the detection results by
performing following steps:
1. For each FPN level, decode box predictions according to the anchor
boxes from at most :attr:`nms_top_k` top-scoring predictions after
thresholding detector confidence at :attr:`score_threshold`.
2. Merge top predictions from all levels and apply multi-class non
maximum suppression (NMS) on them to get the final detections.
Args:
bboxes(List): A list of Tensors from multiple FPN levels represents
the location prediction for all anchor boxes. Each element is
a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
batch size, :math:`Mi` is the number of bounding boxes from
:math:`i`-th FPN level and each bounding box has four coordinate
values and the layout is [xmin, ymin, xmax, ymax]. The data type
of each element is float32 or float64.
scores(List): A list of Tensors from multiple FPN levels represents
the category prediction for all anchor boxes. Each element is a
3-D Tensor with shape :math:`[N, Mi, C]`, :math:`N` is the batch
size, :math:`C` is the class number (**excluding background**),
:math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
level. The data type of each element is float32 or float64.
anchors(List): A list of Tensors from multiple FPN levels represents
the locations of all anchor boxes. Each element is a 2-D Tensor
with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
boxes from :math:`i`-th FPN level, and each bounding box has four
coordinate values and the layout is [xmin, ymin, xmax, ymax].
The data type of each element is float32 or float64.
im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
score_threshold(float): Threshold to filter out bounding boxes
with a confidence score before NMS, default value is set to 0.05.
nms_top_k(int): Maximum number of detections per FPN layer to be
kept according to the confidences before NMS, default value is set to
1000.
keep_top_k(int): Number of total bounding boxes to be kept per image after
NMS step. Default value is set to 100, -1 means keeping all bounding
boxes after NMS step.
nms_threshold(float): The Intersection-over-Union(IoU) threshold used to
filter out boxes in NMS.
nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
Default value is set to 1., which represents the value of
:attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
to be lower than 1. and the value of :attr:`nms_threshold` is set to
be higher than 0.5, everytime a bounding box is filtered out,
the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
= :attr:`nms_threshold` * :attr:`nms_eta` will not be stopped until
the actual value of :attr:`nms_threshold` is lower than or equal to
0.5.
**Notice**: In some cases where the image sizes are very small, it's possible
that there is no detection if :attr:`score_threshold` are used at all
levels. Hence, this OP do not filter out anchors from the highest FPN level
before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
:attr:`anchors` is required to be from the highest FPN level.
Returns:
Variable(The data type is float32 or float64):
The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
:math:`No` is the total number of detections in this mini-batch.
The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
has no detected results. If all images have no detected results,
LoD will be set to 0, and the output tensor is empty (None).
Examples:
.. code-block:: python
import paddle.fluid as fluid
bboxes_low = fluid.data(
name='bboxes_low', shape=[1, 44, 4], dtype='float32')
bboxes_high = fluid.data(
name='bboxes_high', shape=[1, 11, 4], dtype='float32')
scores_low = fluid.data(
name='scores_low', shape=[1, 44, 10], dtype='float32')
scores_high = fluid.data(
name='scores_high', shape=[1, 11, 10], dtype='float32')
anchors_low = fluid.data(
name='anchors_low', shape=[44, 4], dtype='float32')
anchors_high = fluid.data(
name='anchors_high', shape=[11, 4], dtype='float32')
im_info = fluid.data(
name="im_info", shape=[1, 3], dtype='float32')
nmsed_outs = fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low, bboxes_high],
scores=[scores_low, scores_high],
anchors=[anchors_low, anchors_high],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
nms_eta=1.0)
"""
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(bbox, 'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(score, 'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(anchor, 'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_detection_output')
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores'))
helper.append_op(
type="retinanet_detection_output",
inputs={
'BBoxes': bboxes,
'Scores': scores,
'Anchors': anchors,
'ImInfo': im_info
},
attrs={
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': 1.,
},
outputs={'Out': output})
output.stop_gradient = True
return output
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=0,
name=None):
"""
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
See below for an example:
.. code-block:: text
if:
box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
box1.scores = (0.7, 0.2, 0.4) which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)
box2.data = (3.0, 4.0, 8.0, 5.0)
box2.score = (0.3, 0.3, 0.1)
nms_threshold = 0.3
background_label = 0
score_threshold = 0
Then:
iou = 4/11 > 0.3
out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],
[2, 0.4, 2.0, 3.0, 7.0, 5.0]]
Out format is (label, confidence, xmin, ymin, xmax, ymax)
Args:
bboxes (Variable): Two types of bboxes are supported:
1. (Tensor) A 3-D Tensor with shape
[N, M, 4 or 8 16 24 32] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
M is the number of bounding boxes, C is the
class number. The data type is float32 or float64.
scores (Variable): Two types of scores are supported:
1. (Tensor) A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes.The data type is float32 or float64.
2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
M is the number of bbox, C is the class number.
In this case, input BBoxes should be the second
case with shape [M, C, 4].The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the multiclass nms op. Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.multiclass_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'multiclass_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'multiclass_nms')
check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
check_type(normalized, 'normalized', bool, 'multiclass_nms')
check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
check_type(background_label, 'background_label', int, 'multiclass_nms')
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def locality_aware_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=-1,
name=None):
"""
**Local Aware NMS**
`Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
suppression (LANMS) on boxes and scores.
Firstly, this operator merge box and score according their IOU
(intersection over union). In the NMS step, this operator greedily selects a
subset of detection bounding boxes that have high scores larger than score_threshold,
if providing this threshold, then selects the largest nms_top_k confidences scores
if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
represents the predicted locations of M bounding
bboxes, N is the batch size. Each bounding box
has four coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
predicted confidence predictions. N is the batch
size, C is the class number, M is number of bounding
boxes. Now only support 1 class. For each category
there are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension of
BBoxes. The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: -1
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1}). The data type is float32 or float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81],
dtype='float32')
out = fluid.layers.locality_aware_nms(bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
'locality_aware_nms')
check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
'locality_aware_nms')
check_type(background_label, 'background_label', int, 'locality_aware_nms')
check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
check_type(normalized, 'normalized', bool, 'locality_aware_nms')
shape = scores.shape
assert len(shape) == 3, "dim size of scores must be 3"
assert shape[
1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
helper = LayerHelper('locality_aware_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
out = {'Out': output}
helper.append_op(
type="locality_aware_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def matrix_nms(bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.,
background_label=0,
normalized=True,
return_index=False,
name=None):
"""
**Matrix NMS**
This operator does matrix non maximum suppression (NMS).
First selects a subset of candidate bounding boxes that have higher scores
than score_threshold (if provided), then the top k candidate is selected if
nms_top_k is larger than -1. Score of the remaining candidate are then
decayed according to the Matrix NMS scheme.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes. The data type is float32 or float64.
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score.
post_threshold (float): Threshold to filter out bounding boxes with
low confidence score AFTER decaying.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
use_gaussian (bool): Use Gaussian as the decay function. Default: False
gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
normalized (bool): Whether detections are normalized. Default: True
return_index(bool): Whether return selected index. Default: False
name(str): Name of the matrix nms op. Default: None.
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, one Variable(Out) is returned.
Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
detection results.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
selected indices, which are absolute values cross batches.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.matrix_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
post_threshold=0.1,
nms_top_k=400,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'matrix_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'matrix_nms')
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="matrix_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'post_threshold': post_threshold,
'nms_top_k': nms_top_k,
'gaussian_sigma': gaussian_sigma,
'use_gaussian': use_gaussian,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output,
'Index': index})
output.stop_gradient = True
if return_index:
return output, index
else:
return output
def distribute_fpn_proposals(fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
name=None):
"""
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
referring level. Besides, to restore the order of proposals, we return an
array which indicates the original index of rois in current proposals.
To compute FPN level for each roi, the formula is given as follows:
.. math::
roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
where BBoxArea is a function to compute the area of each roi.
Args:
fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
float32 or float64. The input fpn_rois.
min_level(int32): The lowest level of FPN layer where the proposals come
from.
max_level(int32): The highest level of FPN layer where the proposals
come from.
refer_level(int32): The referring level of FPN layer with specified scale.
refer_scale(int32): The referring scale of FPN layer with specified level.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
and data type of float32 and float64. The length is
max_level-min_level+1. The proposals in each FPN level.
restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
the number of total rois. The data type is int32. It is
used to restore the order of fpn_rois.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
"""
check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
'distribute_fpn_proposals')
helper = LayerHelper('distribute_fpn_proposals', **locals())
dtype = helper.input_dtype('fpn_rois')
num_lvl = max_level - min_level + 1
multi_rois = [
helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
]
restore_ind = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='distribute_fpn_proposals',
inputs={'FpnRois': fpn_rois},
outputs={'MultiFpnRois': multi_rois,
'RestoreIndex': restore_ind},
attrs={
'min_level': min_level,
'max_level': max_level,
'refer_level': refer_level,
'refer_scale': refer_scale
})
return multi_rois, restore_ind
@templatedoc()
def box_decoder_and_assign(prior_box,
prior_box_var,
target_box,
box_score,
box_clip,
name=None):
"""
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
target_box(${target_box_type}): ${target_box_comment}
box_score(${box_score_type}): ${box_score_comment}
box_clip(${box_clip_type}): ${box_clip_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
decode_box(${decode_box_type}): ${decode_box_comment}
output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4*81], dtype='float32')
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32')
decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
pb, pbv, loc, scores, 4.135)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
'box_decoder_and_assign')
helper = LayerHelper("box_decoder_and_assign", **locals())
decoded_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
output_assign_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
helper.append_op(
type="box_decoder_and_assign",
inputs={
"PriorBox": prior_box,
"PriorBoxVar": prior_box_var,
"TargetBox": target_box,
"BoxScore": box_score
},
attrs={"box_clip": box_clip},
outputs={
"DecodeBox": decoded_box,
"OutputAssignBox": output_assign_box
})
return decoded_box, output_assign_box
def collect_fpn_proposals(multi_rois,
multi_scores,
min_level,
max_level,
post_nms_top_n,
name=None):
"""
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps:
1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
2. Concat multi-level RoIs and scores
3. Sort scores and select post_nms_top_n scores
4. Gather RoIs by selected indices from scores
5. Re-sort RoIs by corresponding batch_id
Args:
multi_rois(list): List of RoIs to collect. Element in list is 2-D
LoDTensor with shape [N, 4] and data type is float32 or float64,
N is the number of RoIs.
multi_scores(list): List of scores of RoIs to collect. Element in list
is 2-D LoDTensor with shape [N, 1] and data type is float32 or
float64, N is the number of RoIs.
min_level(int): The lowest level of FPN layer to collect
max_level(int): The highest level of FPN layer to collect
post_nms_top_n(int): The number of selected RoIs
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
float32 or float64. Selected RoIs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
multi_rois = []
multi_scores = []
for i in range(4):
multi_rois.append(fluid.data(
name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
for i in range(4):
multi_scores.append(fluid.data(
name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
fpn_rois = fluid.layers.collect_fpn_proposals(
multi_rois=multi_rois,
multi_scores=multi_scores,
min_level=2,
max_level=5,
post_nms_top_n=2000)
"""
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
'collect_fpn_proposals')
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
output_rois = helper.create_variable_for_type_inference(dtype)
output_rois.stop_gradient = True
helper.append_op(
type='collect_fpn_proposals',
inputs={
'MultiLevelRois': input_rois,
'MultiLevelScores': input_scores
},
outputs={'FpnRois': output_rois},
attrs={'post_nms_topN': post_nms_top_n})
return output_rois
| 44.869744
| 224
| 0.610142
|
from __future__ import print_function
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable
from .loss import softmax_with_cross_entropy
from . import tensor
from . import nn
from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [
'prior_box',
'density_prior_box',
'multi_box_head',
'bipartite_match',
'target_assign',
'detection_output',
'ssd_loss',
'rpn_target_assign',
'retinanet_target_assign',
'sigmoid_focal_loss',
'anchor_generator',
'roi_perspective_transform',
'generate_proposal_labels',
'generate_proposals',
'generate_mask_labels',
'iou_similarity',
'box_coder',
'polygon_box_transform',
'yolov3_loss',
'yolo_box',
'box_clip',
'multiclass_nms',
'locality_aware_nms',
'matrix_nms',
'retinanet_detection_output',
'distribute_fpn_proposals',
'box_decoder_and_assign',
'collect_fpn_proposals',
]
def retinanet_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
gt_labels,
is_crowd,
im_info,
num_classes=1,
positive_overlap=0.5,
negative_overlap=0.4):
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="retinanet_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'GtLabels': gt_labels,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight,
'ForegroundNumber': fg_num
},
attrs={
'positive_overlap': positive_overlap,
'negative_overlap': negative_overlap
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num
def rpn_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
is_crowd,
im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=True):
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'rpn_target_assign')
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op(
type="rpn_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight
},
attrs={
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'sigmoid_focal_loss')
check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')
helper = LayerHelper("sigmoid_focal_loss", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="sigmoid_focal_loss",
inputs={"X": x,
"Label": label,
"FgNum": fg_num},
attrs={"gamma": gamma,
'alpha': alpha},
outputs={"Out": out})
return out
def detection_output(loc,
scores,
prior_box,
prior_box_var,
background_label=0,
nms_threshold=0.3,
nms_top_k=400,
keep_top_k=200,
score_threshold=0.01,
nms_eta=1.0,
return_index=False):
helper = LayerHelper("detection_output", **locals())
decoded_box = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=loc,
code_type='decode_center_size')
scores = nn.softmax(input=scores)
scores = nn.transpose(scores, perm=[0, 2, 1])
scores.stop_gradient = True
nmsed_outs = helper.create_variable_for_type_inference(
dtype=decoded_box.dtype)
if return_index:
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="multiclass_nms2",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs,
'Index': index},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
index.stop_gradient = True
else:
helper.append_op(
type="multiclass_nms",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
nmsed_outs.stop_gradient = True
if return_index:
return nmsed_outs, index
return nmsed_outs
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
helper = LayerHelper("iou_similarity", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="iou_similarity",
inputs={"X": x,
"Y": y},
attrs={"box_normalized": box_normalized},
outputs={"Out": out})
return out
@templatedoc()
def box_coder(prior_box,
prior_box_var,
target_box,
code_type="encode_center_size",
box_normalized=True,
name=None,
axis=0):
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_coder')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_coder')
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
"code_type": code_type,
"box_normalized": box_normalized,
"axis": axis
}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, list):
attrs['variance'] = prior_box_var
else:
raise TypeError("Input variance of box_coder must be Variable or lisz")
helper.append_op(
type="box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box})
return output_box
@templatedoc()
def polygon_box_transform(input, name=None):
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'polygon_box_transform')
helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="polygon_box_transform",
inputs={"Input": input},
attrs={},
outputs={"Output": output})
return output
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
gt_box,
gt_label,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
gt_score=None,
use_label_smooth=True,
name=None,
scale_x_y=1.):
helper = LayerHelper('yolov3_loss', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolov3_loss must be Variable")
if not isinstance(gt_box, Variable):
raise TypeError("Input gtbox of yolov3_loss must be Variable")
if not isinstance(gt_label, Variable):
raise TypeError("Input gtlabel of yolov3_loss must be Variable")
if gt_score is not None and not isinstance(gt_score, Variable):
raise TypeError("Input gtscore of yolov3_loss must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolov3_loss must be an integer")
if not isinstance(ignore_thresh, float):
raise TypeError(
"Attr ignore_thresh of yolov3_loss must be a float number")
if not isinstance(use_label_smooth, bool):
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": x,
"GTBox": gt_box,
"GTLabel": gt_label,
}
if gt_score is not None:
inputs["GTScore"] = gt_score
attrs = {
"anchors": anchors,
"anchor_mask": anchor_mask,
"class_num": class_num,
"ignore_thresh": ignore_thresh,
"downsample_ratio": downsample_ratio,
"use_label_smooth": use_label_smooth,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask
},
attrs=attrs)
return loss
@templatedoc(op_type="yolo_box")
def yolo_box(x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox=True,
name=None,
scale_x_y=1.):
helper = LayerHelper('yolo_box', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolo_box must be Variable")
if not isinstance(img_size, Variable):
raise TypeError("Input img_size of yolo_box must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolo_box must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolo_box must be an integer")
if not isinstance(conf_thresh, float):
raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs)
return boxes, scores
@templatedoc()
def detection_map(detect_res,
label,
class_num,
background_label=0,
overlap_threshold=0.3,
evaluate_difficult=True,
has_state=None,
input_states=None,
out_states=None,
ap_version='integral'):
helper = LayerHelper("detection_map", **locals())
def __create_var(type):
return helper.create_variable_for_type_inference(dtype=type)
map_out = __create_var('float32')
accum_pos_count_out = out_states[
0] if out_states is not None else __create_var('int32')
accum_true_pos_out = out_states[
1] if out_states is not None else __create_var('float32')
accum_false_pos_out = out_states[
2] if out_states is not None else __create_var('float32')
pos_count = input_states[0] if input_states is not None else None
true_pos = input_states[1] if input_states is not None else None
false_pos = input_states[2] if input_states is not None else None
helper.append_op(
type="detection_map",
inputs={
'Label': label,
'DetectRes': detect_res,
'HasState': has_state,
'PosCount': pos_count,
'TruePos': true_pos,
'FalsePos': false_pos
},
outputs={
'MAP': map_out,
'AccumPosCount': accum_pos_count_out,
'AccumTruePos': accum_true_pos_out,
'AccumFalsePos': accum_false_pos_out
},
attrs={
'overlap_threshold': overlap_threshold,
'evaluate_difficult': evaluate_difficult,
'ap_type': ap_version,
'class_num': class_num,
})
return map_out
def bipartite_match(dist_matrix,
match_type=None,
dist_threshold=None,
name=None):
helper = LayerHelper('bipartite_match', **locals())
match_indices = helper.create_variable_for_type_inference(dtype='int32')
match_distance = helper.create_variable_for_type_inference(
dtype=dist_matrix.dtype)
helper.append_op(
type='bipartite_match',
inputs={'DistMat': dist_matrix},
attrs={
'match_type': match_type,
'dist_threshold': dist_threshold,
},
outputs={
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_distance
})
return match_indices, match_distance
def target_assign(input,
matched_indices,
negative_indices=None,
mismatch_value=None,
name=None):
helper = LayerHelper('target_assign', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
out_weight = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op(
type='target_assign',
inputs={
'X': input,
'MatchIndices': matched_indices,
'NegIndices': negative_indices
},
outputs={'Out': out,
'OutWeight': out_weight},
attrs={'mismatch_value': mismatch_value})
return out, out_weight
def ssd_loss(location,
confidence,
gt_box,
gt_label,
prior_box,
prior_box_var=None,
background_label=0,
overlap_threshold=0.5,
neg_pos_ratio=3.0,
neg_overlap=0.5,
loc_loss_weight=1.0,
conf_loss_weight=1.0,
match_type='per_prediction',
mining_type='max_negative',
normalize=True,
sample_size=None):
helper = LayerHelper('ssd_loss', **locals())
if mining_type != 'max_negative':
raise ValueError("Only support mining_type == max_negative now.")
num, num_prior, num_class = confidence.shape
conf_shape = nn.shape(confidence)
def __reshape_to_2d(var):
return nn.flatten(x=var, axis=2)
iou = iou_similarity(x=gt_box, y=prior_box)
matched_indices, matched_dist = bipartite_match(iou, match_type,
overlap_threshold)
gt_label = nn.reshape(
x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
gt_label.stop_gradient = True
target_label, _ = target_assign(
gt_label, matched_indices, mismatch_value=background_label)
confidence = __reshape_to_2d(confidence)
target_label = tensor.cast(x=target_label, dtype='int64')
target_label = __reshape_to_2d(target_label)
target_label.stop_gradient = True
conf_loss = softmax_with_cross_entropy(confidence, target_label)
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape.stop_gradient = True
conf_loss = nn.reshape(
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
conf_loss.stop_gradient = True
neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype
updated_matched_indices = helper.create_variable_for_type_inference(
dtype=dtype)
helper.append_op(
type='mine_hard_examples',
inputs={
'ClsLoss': conf_loss,
'LocLoss': None,
'MatchIndices': matched_indices,
'MatchDist': matched_dist,
},
outputs={
'NegIndices': neg_indices,
'UpdatedMatchIndices': updated_matched_indices
},
attrs={
'neg_pos_ratio': neg_pos_ratio,
'neg_dist_threshold': neg_overlap,
'mining_type': mining_type,
'sample_size': sample_size,
})
encoded_bbox = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=gt_box,
code_type='encode_center_size')
target_bbox, target_loc_weight = target_assign(
encoded_bbox, updated_matched_indices, mismatch_value=background_label)
target_label, target_conf_weight = target_assign(
gt_label,
updated_matched_indices,
negative_indices=neg_indices,
mismatch_value=background_label)
target_label = __reshape_to_2d(target_label)
target_label = tensor.cast(x=target_label, dtype='int64')
conf_loss = softmax_with_cross_entropy(confidence, target_label)
target_conf_weight = __reshape_to_2d(target_conf_weight)
conf_loss = conf_loss * target_conf_weight
target_label.stop_gradient = True
target_conf_weight.stop_gradient = True
location = __reshape_to_2d(location)
target_bbox = __reshape_to_2d(target_bbox)
loc_loss = nn.smooth_l1(location, target_bbox)
target_loc_weight = __reshape_to_2d(target_loc_weight)
loc_loss = loc_loss * target_loc_weight
target_bbox.stop_gradient = True
target_loc_weight.stop_gradient = True
loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
if normalize:
normalizer = nn.reduce_sum(target_loc_weight)
loss = loss / normalizer
return loss
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
name=None,
min_max_aspect_ratios_order=False):
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order
}
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def density_prior_box(input,
image,
densities=None,
fixed_sizes=None,
fixed_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
clip=False,
steps=[0.0, 0.0],
offset=0.5,
flatten_to_2d=False,
name=None):
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'density_prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
check_type(densities, 'densities', (list, tuple), 'density_prior_box')
check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
if len(densities) != len(fixed_sizes):
raise ValueError('densities and fixed_sizes length should be euqal.')
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
densities = list(map(int, densities))
fixed_sizes = list(map(float, fixed_sizes))
fixed_ratios = list(map(float, fixed_ratios))
steps = list(map(float, steps))
attrs = {
'variances': variance,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'densities': densities,
'fixed_sizes': fixed_sizes,
'fixed_ratios': fixed_ratios,
'flatten_to_2d': flatten_to_2d,
}
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="density_prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h is not None:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w is not None:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps is not None:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
mbox_locs.append(mbox_loc_flatten)
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = nn.reshape(
mbox_confs_concat, shape=[0, -1, num_classes])
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var
def anchor_generator(input,
anchor_sizes=None,
aspect_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
stride=None,
offset=0.5,
name=None):
helper = LayerHelper("anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError('stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).')
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'stride': stride,
'offset': offset
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor,
"Variances": var},
attrs=attrs, )
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
def roi_perspective_transform(input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0,
name=None):
check_variable_and_dtype(input, 'input', ['float32'],
'roi_perspective_transform')
check_variable_and_dtype(rois, 'rois', ['float32'],
'roi_perspective_transform')
check_type(transformed_height, 'transformed_height', int,
'roi_perspective_transform')
check_type(transformed_width, 'transformed_width', int,
'roi_perspective_transform')
check_type(spatial_scale, 'spatial_scale', float,
'roi_perspective_transform')
helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype="int32")
transform_matrix = helper.create_variable_for_type_inference(dtype)
out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
out2in_w = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_perspective_transform",
inputs={"X": input,
"ROIs": rois},
outputs={
"Out": out,
"Out2InIdx": out2in_idx,
"Out2InWeights": out2in_w,
"Mask": mask,
"TransformMatrix": transform_matrix
},
attrs={
"transformed_height": transformed_height,
"transformed_width": transformed_width,
"spatial_scale": spatial_scale
})
return out, mask, transform_matrix
def generate_proposal_labels(rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
helper = LayerHelper('generate_proposal_labels', **locals())
check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
'generate_proposal_labels')
check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
'generate_proposal_labels')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'generate_proposal_labels')
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op(
type="generate_proposal_labels",
inputs={
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info
},
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic,
'is_cascade_rcnn': is_cascade_rcnn
})
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
helper = LayerHelper('generate_mask_labels', **locals())
mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
roi_has_mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
helper.append_op(
type="generate_mask_labels",
inputs={
'ImInfo': im_info,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtSegms': gt_segms,
'Rois': rois,
'LabelsInt32': labels_int32
},
outputs={
'MaskRois': mask_rois,
'RoiHasMaskInt32': roi_has_mask_int32,
'MaskInt32': mask_int32
},
attrs={'num_classes': num_classes,
'resolution': resolution})
mask_rois.stop_gradient = True
roi_has_mask_int32.stop_gradient = True
mask_int32.stop_gradient = True
return mask_rois, roi_has_mask_int32, mask_int32
def generate_proposals(scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
name=None,
return_rois_num=False):
helper = LayerHelper('generate_proposals', **locals())
check_variable_and_dtype(scores, 'scores', ['float32'],
'generate_proposals')
check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
'generate_proposals')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'generate_proposals')
check_variable_and_dtype(anchors, 'anchors', ['float32'],
'generate_proposals')
check_variable_and_dtype(variances, 'variances', ['float32'],
'generate_proposals')
rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
rpn_rois_lod = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="generate_proposals",
inputs={
'Scores': scores,
'BboxDeltas': bbox_deltas,
'ImInfo': im_info,
'Anchors': anchors,
'Variances': variances
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size,
'eta': eta
},
outputs={
'RpnRois': rpn_rois,
'RpnRoiProbs': rpn_roi_probs,
'RpnRoisLod': rpn_rois_lod
})
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
rpn_rois_lod.stop_gradient = True
if return_rois_num:
return rpn_rois, rpn_roi_probs, rpn_rois_lod
else:
return rpn_rois, rpn_roi_probs
def box_clip(input, im_info, name=None):
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'box_clip')
helper = LayerHelper("box_clip", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
inputs = {"Input": input, "ImInfo": im_info}
helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
return output
def retinanet_detection_output(bboxes,
scores,
anchors,
im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0):
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(bbox, 'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(score, 'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(anchor, 'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_detection_output')
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores'))
helper.append_op(
type="retinanet_detection_output",
inputs={
'BBoxes': bboxes,
'Scores': scores,
'Anchors': anchors,
'ImInfo': im_info
},
attrs={
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': 1.,
},
outputs={'Out': output})
output.stop_gradient = True
return output
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=0,
name=None):
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'multiclass_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'multiclass_nms')
check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
check_type(normalized, 'normalized', bool, 'multiclass_nms')
check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
check_type(background_label, 'background_label', int, 'multiclass_nms')
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def locality_aware_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=-1,
name=None):
check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
'locality_aware_nms')
check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
'locality_aware_nms')
check_type(background_label, 'background_label', int, 'locality_aware_nms')
check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
check_type(normalized, 'normalized', bool, 'locality_aware_nms')
shape = scores.shape
assert len(shape) == 3, "dim size of scores must be 3"
assert shape[
1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
helper = LayerHelper('locality_aware_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
out = {'Out': output}
helper.append_op(
type="locality_aware_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def matrix_nms(bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.,
background_label=0,
normalized=True,
return_index=False,
name=None):
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'matrix_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'matrix_nms')
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="matrix_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'post_threshold': post_threshold,
'nms_top_k': nms_top_k,
'gaussian_sigma': gaussian_sigma,
'use_gaussian': use_gaussian,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output,
'Index': index})
output.stop_gradient = True
if return_index:
return output, index
else:
return output
def distribute_fpn_proposals(fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
name=None):
check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
'distribute_fpn_proposals')
helper = LayerHelper('distribute_fpn_proposals', **locals())
dtype = helper.input_dtype('fpn_rois')
num_lvl = max_level - min_level + 1
multi_rois = [
helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
]
restore_ind = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='distribute_fpn_proposals',
inputs={'FpnRois': fpn_rois},
outputs={'MultiFpnRois': multi_rois,
'RestoreIndex': restore_ind},
attrs={
'min_level': min_level,
'max_level': max_level,
'refer_level': refer_level,
'refer_scale': refer_scale
})
return multi_rois, restore_ind
@templatedoc()
def box_decoder_and_assign(prior_box,
prior_box_var,
target_box,
box_score,
box_clip,
name=None):
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
'box_decoder_and_assign')
helper = LayerHelper("box_decoder_and_assign", **locals())
decoded_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
output_assign_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
helper.append_op(
type="box_decoder_and_assign",
inputs={
"PriorBox": prior_box,
"PriorBoxVar": prior_box_var,
"TargetBox": target_box,
"BoxScore": box_score
},
attrs={"box_clip": box_clip},
outputs={
"DecodeBox": decoded_box,
"OutputAssignBox": output_assign_box
})
return decoded_box, output_assign_box
def collect_fpn_proposals(multi_rois,
multi_scores,
min_level,
max_level,
post_nms_top_n,
name=None):
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
'collect_fpn_proposals')
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
output_rois = helper.create_variable_for_type_inference(dtype)
output_rois.stop_gradient = True
helper.append_op(
type='collect_fpn_proposals',
inputs={
'MultiLevelRois': input_rois,
'MultiLevelScores': input_scores
},
outputs={'FpnRois': output_rois},
attrs={'post_nms_topN': post_nms_top_n})
return output_rois
| true
| true
|
7906f631304b4282f2f80e05dfd5cc90e50ef925
| 95,139
|
py
|
Python
|
jp.atcoder/abc009/abc009_4/17183548.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc009/abc009_4/17183548.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc009/abc009_4/17183548.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import itertools
import math
import string
import sys
from bisect import bisect_left as bi_l
from bisect import bisect_right as bi_r
from collections import Counter, defaultdict, deque
from heapq import heappop, heappush
from operator import or_, xor
inf = float("inf")
from functools import lru_cache, reduce
sys.setrecursionlimit(10**6)
MOD = 10**9 + 7
# MOD = 998244353
global using_numpy
using_numpy = False
import networkx as nx
import numpy as np
from numba import jit
from scipy import optimize
from scipy.ndimage import distance_transform_cdt
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import (
csgraph_to_dense,
maximum_flow,
minimum_spanning_tree,
shortest_path,
)
from scipy.spatial import ConvexHull
from scipy.special import comb
class Algebra:
class Mint(int):
def __init__(self, n, mod=MOD):
self.value = n
self.mod = mod
def __str__(self):
return f"{self.value}"
def __add__(self, x):
return self.__class__((self.value + x.value) % self.mod)
def __sub__(self, x):
return self.__class__((self.value - x.value) % self.mod)
def __mul__(self, x):
return self.__class__((self.value * x.value) % self.mod)
def __pow__(self, x):
return self.__class__(pow(self.value, x.value, self.mod))
def __lt__(self, x):
return self.value < x.value
def __le__(self, x):
return self.value <= x.value
def __eq__(self, x):
return self.value == x.value
def __ne__(self, x):
return self.value != x.value
def __gt__(self, x):
return self.value > x.value
def __ge__(self, x):
return self.value >= x.value
class SemiGroup:
pass
class Monoid:
pass
class Group:
pass
class SemiRing:
pass
class Ring:
pass
@staticmethod
def identity(n):
if using_numpy:
return np.identity(n, dtype=np.int64)
else:
a = [[0] * n for _ in range(n)]
for i in range(n):
a[i][i] = 1
return a
@staticmethod
def dot(a, b):
if using_numpy:
return np.dot(a, b)
else:
assert len(a[0]) == len(b)
c = [[0] * len(b[0]) for _ in range(len(a))]
for i in range(len(a)):
for j in range(len(b[0])):
for k in range(len(b)):
c[i][j] += a[i][k] * b[k][j]
return c
@classmethod
def matrix_pow(cls, a, n, mod=10**9 + 7):
m = len(a)
b = cls.identity(m)
while n:
if n & 1:
b = cls.dot(b, a)
n >>= 1
a = cls.dot(a, a)
if using_numpy:
a %= mod
b %= mod
else:
for i in range(m):
for j in range(m):
a[i][j] %= mod
b[i][j] %= mod
return b
@staticmethod
def bitwise_dot(a, b):
if using_numpy:
return np.bitwise_xor.reduce(
a[:, None, :] & b.T[None, :, :], axis=-1
)
else:
assert len(a[0]) == len(b)
c = [[0] * len(b[0]) for _ in range(len(a))]
for i in range(len(a)):
for j in range(len(b[0])):
for k in range(len(b)):
c[i][j] ^= a[i][k] & b[k][j]
return c
@classmethod
def bitwise_mat_pow(cls, a, n):
if n == 0:
return np.eye(len(a), dtype=np.uint32) * ((1 << 32) - 1)
res = cls.bitwise_mat_pow(a, n // 2)
res = cls.bitwise_dot(res, res)
return cls.bitwise_dot(res, a) if n & 1 else res
class NumberTheory:
def __init__(self, n=2 * 10**6):
self.n = n
self.is_prime_number, self.prime_numbers = self.sieve_of_eratosthenes(
n
)
def sieve_of_eratosthenes(self, n):
if using_numpy:
sieve = np.ones(n + 1, dtype=np.int32)
sieve[:2] = 0
for i in range(2, int(n**0.5) + 1):
if sieve[i]:
sieve[i * 2 :: i] = 0
prime_numbers = np.flatnonzero(sieve)
else:
sieve = [1] * (n + 1)
sieve[0] = sieve[1] = 0
for i in range(2, int(n**0.5) + 1):
if not sieve[i]:
continue
for j in range(i * 2, n + 1, i):
sieve[j] = 0
prime_numbers = [i for i in range(2, n + 1) if sieve[i]]
return sieve, prime_numbers
def prime_factorize(self, n):
res = dict()
if n < 2:
return res
border = int(n**0.5)
for p in self.prime_numbers:
if p > border:
break
while n % p == 0:
res[p] = res.get(p, 0) + 1
n //= p
if n == 1:
return res
res[n] = 1
return res
def prime_factorize_factorial(self, n):
res = dict()
for i in range(2, n + 1):
for p, c in self.prime_factorize(i).items():
res[p] = res.get(p, 0) + c
return res
@classmethod
@lru_cache(maxsize=None)
def gcd(cls, a, b):
return cls.gcd(b, a % b) if b else abs(a)
@classmethod
def lcm(cls, a, b):
return abs(a // cls.gcd(a, b) * b)
@staticmethod
def find_divisors(n):
divisors = []
for i in range(1, int(n**0.5) + 1):
if n % i:
continue
divisors.append(i)
j = n // i
if j != i:
divisors.append(j)
return sorted(divisors)
@staticmethod
def base_convert(n, b):
if not n:
return [0]
res = []
while n:
n, r = divmod(n, b)
if r < 0:
n += 1
r -= b
res.append(r)
return res
mint = Algebra.Mint
class Combinatorics:
def __init__(self, N=10**9, n=10**6, mod=10**9 + 7):
self.mod = mod
self.make_mod_tables(N, n)
@classmethod
@lru_cache(maxsize=None)
def choose(cls, n, r, mod=None): # no mod, or mod ≠ prime
if r > n or r < 0:
return 0
if r == 0:
return 1
res = cls.choose(n - 1, r, mod) + cls.choose(n - 1, r - 1, mod)
if mod:
res %= mod
return res
def cumprod(self, a):
p = self.mod
l = len(a)
sql = int(np.sqrt(l) + 1)
a = np.resize(a, sql**2).reshape(sql, sql)
for i in range(sql - 1):
a[:, i + 1] *= a[:, i]
a[:, i + 1] %= p
for i in range(sql - 1):
a[i + 1] *= a[i, -1]
a[i + 1] %= p
return np.ravel(a)[:l]
def make_mod_tables(self, N, n):
p = self.mod
if using_numpy:
fac = np.arange(n + 1)
fac[0] = 1
fac = self.cumprod(fac)
ifac = np.arange(n + 1, 0, -1)
ifac[0] = pow(int(fac[-1]), p - 2, p)
ifac = self.cumprod(ifac)[n::-1]
n_choose = np.arange(N + 1, N - n, -1)
n_choose[0] = 1
n_choose[1:] = self.cumprod(n_choose[1:]) * ifac[1 : n + 1] % p
else:
fac = [None] * (n + 1)
fac[0] = 1
for i in range(n):
fac[i + 1] = fac[i] * (i + 1) % p
ifac = [None] * (n + 1)
ifac[n] = pow(fac[n], p - 2, p)
for i in range(n, 0, -1):
ifac[i - 1] = ifac[i] * i % p
n_choose = [None] * (n + 1)
n_choose[0] = 1
for i in range(n):
n_choose[i + 1] = n_choose[i] * (N - i) % p
for i in range(n + 1):
n_choose[i] = n_choose[i] * ifac[i] % p
self.fac, self.ifac, self.mod_n_choose = fac, ifac, n_choose
def mod_choose(self, n, r):
p = self.mod
return self.fac[n] * self.ifac[r] % p * self.ifac[n - r] % p
@classmethod
def permutations(cls, a, r=None, i=0):
a = list(a)
n = len(a)
if r is None:
r = n
res = []
if r > n or i > r:
return res
if i == r:
return [tuple(a[:r])]
for j in range(i, n):
a[i], a[j] = a[j], a[i]
res += cls.permutations(a, r, i + 1)
return res
@staticmethod
def combinations(a, r):
a = tuple(a)
n = len(a)
if r > n:
return
indices = list(range(r))
yield a[:r]
while True:
for i in range(r - 1, -1, -1):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i + 1, r):
indices[j] = indices[j - 1] + 1
yield tuple(a[i] for i in indices)
class String:
@staticmethod
def z_algorithm(s):
n = len(s)
a = [0] * n
a[0] = n
l = r = -1
for i in range(1, n):
if r >= i:
a[i] = min(a[i - l], r - i)
while i + a[i] < n and s[i + a[i]] == s[a[i]]:
a[i] += 1
if i + a[i] >= r:
l, r = i, i + a[i]
return a
class GeometryTopology:
class Graph:
def __init__(self, nodes={}, edges={}):
self.nodes = nodes
self.edges = edges
def add_node(self, v, **info):
if not v in self.edges:
self.edges[v] = {}
if v in self.nodes:
return
self.nodes[v] = info
def add_edge(self, u, v, **info):
self.add_node(u)
self.add_node(v)
self.edges[u][v] = info
def get_size(self):
return len(self.nodes)
def dinic(self, src, sink):
def bfs():
lv = {src: 0}
q = deque([src])
while q:
u = q.popleft()
for v, e in self.edges[u].items():
if e["capacity"] == 0 or v in lv:
continue
lv[v] = lv[u] + 1
q.append(v)
return lv
def flow_to_sink(u, flow_in):
if u == sink:
return flow_in
flow = 0
for v, e in self.edges[u].items():
cap = e["capacity"]
if cap == 0 or lv[v] <= lv[u]:
continue
f = flow_to_sink(v, min(flow_in, cap))
if not f:
continue
self.edges[u][v]["capacity"] -= f
if v in self.edges and u in self.edges[v]:
self.edges[v][u]["capacity"] += f
else:
self.add_edge(v, u, capacity=f)
flow_in -= f
flow += f
return flow
flow = 0
while True:
lv = bfs()
if not sink in lv:
return flow
flow += flow_to_sink(src, inf)
def ford_fulkerson(self):
pass
def push_relabel(self):
pass
def floyd_warshall(self):
d = {u: {v: inf for v in self.nodes} for u in self.nodes}
for v in self.nodes:
d[v][v] = 0
for u in self.edges:
for v in self.edges[u]:
d[u][v] = self.edges[u][v]["weight"]
for w in self.nodes:
for u in self.nodes:
for v in self.nodes:
d[u][v] = min(d[u][v], d[u][w] + d[w][v])
return d
def dijkstra(self, src, paths_cnt=False, mod=None):
dist = {v: inf for v in self.nodes}
dist[src] = 0
visited = set()
paths = {v: 0 for v in self.nodes}
paths[src] = 1
q = [(0, src)]
while q:
d, u = heappop(q)
if u in visited:
continue
visited.add(u)
for v, e in self.edges[u].items():
dv = d + e["weight"]
if dv > dist[v]:
continue
elif dv == dist[v]:
paths[v] += paths[u]
if mod:
paths[v] %= mod
continue
paths[v] = paths[u]
dist[v] = dv
heappush(q, (dv, v))
if paths_cnt:
return dist, paths
else:
return dist
def astar(self, src, tgt, heuristic_func):
cost = {v: inf for v in self.nodes}
q = [(heuristic_func(src, tgt), 0, src)]
while q:
s, c, u = heappop(q)
if u == tgt:
return c
if cost[u] != inf:
continue
cost[u] = c
for v, e in self.edges[u].items():
if cost[v] != inf:
continue
h = heuristic_func(v, tgt)
nc = c + e["weight"]
heappush(q, (h + nc, nc, v))
return inf
def init_tree(self, root=0):
self.depth = {root: 0}
self.dist = {root: 0}
self.ancestors = [{root: root}]
stack = [root]
while stack:
u = stack.pop()
for v, e in self.edges[u].items():
if v == self.ancestors[0][u]:
continue
self.dist[v] = self.dist[u] + e["weight"]
self.depth[v] = self.depth[u] + 1
self.ancestors[0][v] = u
stack.append(v)
# tree doubling
for _ in range(max(self.depth).bit_length()):
ancestor = self.ancestors[-1]
nxt_ancestor = {v: ancestor[ancestor[v]] for v in self.nodes}
self.ancestors.append(nxt_ancestor)
def find_dist(self, u, v):
return (
self.dist[u]
+ self.dist[v]
- 2 * self.dist[self.find_lca(u, v)]
)
def find_lca(self, u, v):
du, dv = self.depth[u], self.depth[v]
if du > dv:
u, v = v, u
du, dv = dv, du
d = dv - du
for i in range((d).bit_length()): # up-stream
if d >> i & 1:
v = self.ancestors[i][v]
if v == u:
return v
for i in range(
du.bit_length() - 1, -1, -1
): # find direct child of LCA.
nu, nv = self.ancestors[i][u], self.ancestors[i][v]
if nu == nv:
continue
u, v = nu, nv
return self.ancestors[0][u]
@staticmethod
def triangle_area(p0, p1, p2, signed=False):
x1, y1, x2, y2 = (
p1[0] - p0[0],
p1[1] - p0[1],
p2[0] - p0[0],
p2[1] - p0[1],
)
return (
(x1 * y2 - x2 * y1) / 2 if signed else abs(x1 * y2 - x2 * y1) / 2
)
@classmethod
def intersect(cls, seg1, seg2):
(p1, p2), (p3, p4) = seg1, seg2
t1 = cls.triangle_area(p1, p2, p3, signed=True)
t2 = cls.triangle_area(p1, p2, p4, signed=True)
t3 = cls.triangle_area(p3, p4, p1, signed=True)
t4 = cls.triangle_area(p3, p4, p2, signed=True)
return (t1 * t2 < 0) & (t3 * t4 < 0)
class UnionFind:
def __init__(self, n=10**6):
self.root = list(range(n))
self.height = [0] * n
self.size = [1] * n
def find_root(self, u):
if self.root[u] == u:
return u
self.root[u] = self.find_root(self.root[u])
return self.root[u]
def unite(self, u, v):
ru = self.find_root(u)
rv = self.find_root(v)
if ru == rv:
return
hu = self.height[ru]
hv = self.height[rv]
if hu >= hv:
self.root[rv] = ru
self.size[ru] += self.size[rv]
self.height[ru] = max(hu, hv + 1)
else:
self.root[ru] = rv
self.size[rv] += self.size[ru]
def cumxor(a):
return reduce(xor, a, 0)
def cumor(a):
return reduce(or_, a, 0)
def bit_count(n):
cnt = 0
while n:
cnt += n & 1
n >>= 1
return cnt
class AtCoder:
class ABC001:
@staticmethod
def a():
h1, h2 = map(int, sys.stdin.read().split())
print(h1 - h2)
@staticmethod
def d():
def to_minuites(x):
q, r = divmod(x, 100)
return 60 * q + r
def to_hmform(x):
q, r = divmod(x, 60)
return 100 * q + r
n = int(sys.stdin.readline().rstrip())
term = [0] * 2001
for _ in range(n):
s, e = map(
to_minuites,
map(int, sys.stdin.readline().rstrip().split("-")),
)
s = s // 5 * 5
e = (e + 4) // 5 * 5
term[s] += 1
term[e + 1] -= 1
for i in range(2000):
term[i + 1] += term[i]
res = []
raining = False
for i in range(2001):
if term[i]:
if not raining:
s = i
raining = True
elif raining:
res.append((s, i - 1))
raining = False
for s, e in res:
print(f"{to_hmform(s):04}-{to_hmform(e):04}")
class ABC002:
@staticmethod
def a():
print(max(map(int, sys.stdin.readline().split())))
@staticmethod
def b():
vowels = set("aeiou")
print(
"".join(
[
c
for c in sys.stdin.readline().rstrip()
if c not in vowels
]
)
)
@staticmethod
def c():
print(
GeometryTopology.triangle_area(
*map(int, sys.stdin.readline().split())
)
)
@staticmethod
def d():
n, m = map(int, sys.stdin.readline().split())
edges = set(
(x - 1, y - 1)
for x, y in zip(*[map(int, sys.stdin.read().split())] * 2)
)
print(
max(
len(s)
for i in range(1, 1 << n)
for s in [[j for j in range(n) if i >> j & 1]]
if all(
(x, y) in edges
for x, y in itertools.combinations(s, 2)
)
)
)
@staticmethod
def d_2():
n, m = map(int, sys.stdin.readline().split())
relations = [1 << i for i in range(n)]
for x, y in zip(*[map(int, sys.stdin.read().split())] * 2):
x -= 1
y -= 1
relations[x] |= 1 << y
relations[y] |= 1 << x
res = 0
for i in range(1 << n):
cnt = 0
s = 0
t = (1 << n) - 1
for j in range(n):
if i >> j & 1:
s |= 1 << j
t &= relations[j]
cnt += 1
if t & s == s:
res = max(res, cnt)
print(res)
class ABC003:
@staticmethod
def a():
print((int(sys.stdin.readline().rstrip()) + 1) * 5000)
@staticmethod
def b():
atcoder = set("atcoder")
s, t = sys.stdin.read().split()
print(
all(
s[i] == t[i]
or s[i] == "@"
and t[i] in atcoder
or t[i] == "@"
and s[i] in atcoder
for i in range(len(s))
)
and "You can win"
or "You will lose"
)
@staticmethod
def c():
n, k, *r = map(int, sys.stdin.read().split())
print(reduce(lambda x, y: (x + y) / 2, sorted(r)[-k:], 0))
class ABC004:
@staticmethod
def a():
print(int(sys.stdin.readline().rstrip()) * 2)
@staticmethod
def b():
for l in [sys.stdin.readline().rstrip() for _ in range(4)][::-1]:
print(l[::-1])
@staticmethod
def c():
n = int(sys.stdin.readline().rstrip()) % 30
res = list(range(1, 7))
for i in range(n):
i %= 5
res[i], res[i + 1] = res[i + 1], res[i]
print(*res, sep="")
class ABC005:
@staticmethod
def a():
x, y = map(int, sys.stdin.readline().split())
print(y // x)
@staticmethod
def b():
n, *t = map(int, sys.stdin.read().split())
print(min(t))
@staticmethod
def c():
t = int(sys.stdin.readline().rstrip())
n = int(sys.stdin.readline().rstrip())
a = [int(x) for x in sys.stdin.readline().split()]
m = int(sys.stdin.readline().rstrip())
b = [int(x) for x in sys.stdin.readline().split()]
i = 0
for p in b:
if i == n:
print("no")
return
while p - a[i] > t:
i += 1
if i == n:
print("no")
return
if a[i] > p:
print("no")
return
i += 1
print("yes")
@staticmethod
def d():
n = int(sys.stdin.readline().rstrip())
d = np.array(
[sys.stdin.readline().split() for _ in range(n)], np.int64
)
s = d.cumsum(axis=0).cumsum(axis=1)
s = np.pad(s, 1)
max_del = np.zeros((n + 1, n + 1), dtype=np.int64)
for y in range(1, n + 1):
for x in range(1, n + 1):
max_del[y, x] = np.amax(
s[y : n + 1, x : n + 1]
- s[0 : n - y + 1, x : n + 1]
- s[y : n + 1, 0 : n - x + 1]
+ s[0 : n - y + 1, 0 : n - x + 1]
)
res = np.arange(n**2 + 1)[:, None]
i = np.arange(1, n + 1)
res = max_del[i, np.minimum(res // i, n)].max(axis=1)
q = int(sys.stdin.readline().rstrip())
p = np.array(sys.stdin.read().split(), dtype=np.int64)
print(*res[p], sep="\n")
class ABC006:
@staticmethod
def a():
n = sys.stdin.readline().rstrip()
if "3" in n:
print("YES")
elif int(n) % 3 == 0:
print("YES")
else:
print("NO")
@staticmethod
def b():
mod = 10007
a = np.eye(N=3, k=-1, dtype=np.int64)
a[0] = 1
n = int(sys.stdin.readline().rstrip())
a = Algebra.matrix_pow(a, n - 1, mod)
print(a[2][0])
@staticmethod
def c():
n, m = map(int, sys.stdin.readline().split())
cnt = [0, 0, 0]
if m == 1:
cnt = [-1, -1, -1]
else:
if m & 1:
m -= 3
cnt[1] += 1
n -= 1
cnt[2] = m // 2 - n
cnt[0] = n - cnt[2]
if cnt[0] < 0 or cnt[1] < 0 or cnt[2] < 0:
print(-1, -1, -1)
else:
print(*cnt, sep=" ")
@staticmethod
def d():
n, *c = map(int, sys.stdin.read().split())
lis = [inf] * n
for x in c:
lis[bi_l(lis, x)] = x
print(n - bi_l(lis, inf))
class ABC007:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
print(n - 1)
@staticmethod
def b():
s = sys.stdin.readline().rstrip()
if s == "a":
print(-1)
else:
print("a")
@staticmethod
def c():
r, c = map(int, sys.stdin.readline().split())
sy, sx = map(int, sys.stdin.readline().split())
gy, gx = map(int, sys.stdin.readline().split())
sy -= 1
sx -= 1
gy -= 1
gx -= 1
maze = [sys.stdin.readline().rstrip() for _ in range(r)]
queue = deque([(sy, sx)])
dist = np.full((r, c), np.inf)
dist[sy, sx] = 0
while queue:
y, x = queue.popleft()
for i, j in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
i += y
j += x
if maze[i][j] == "#" or dist[i, j] != np.inf:
continue
dist[i, j] = dist[y, x] + 1
queue.append((i, j))
print(int(dist[gy, gx]))
@staticmethod
def d():
ng = set([4, 9])
def count(d):
return d if d <= 4 else d - 1
def f(n):
x = [int(d) for d in str(n)]
flg = True
dp = 0
for d in x:
dp = dp * 8 + flg * count(d)
if d in ng:
flg = False
return n - (dp + flg)
a, b = map(int, sys.stdin.readline().split())
print(f(b) - f(a - 1))
class ABC008:
@staticmethod
def a():
s, t = map(int, sys.stdin.readline().split())
print(t - s + 1)
@staticmethod
def b():
n, *s = sys.stdin.read().split()
res = defaultdict(int)
for name in s:
res[name] += 1
print(sorted(res.items(), key=lambda x: x[1])[-1][0])
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
a = np.array(a)
c = n - np.count_nonzero(a[:, None] % a, axis=1)
print(np.sum((c + 1) // 2 / c))
@staticmethod
def d():
w, h, n, *xy = map(int, sys.stdin.read().split())
(*xy,) = zip(*([iter(xy)] * 2))
@lru_cache(maxsize=None)
def count(x1, y1, x2, y2):
res = 0
for x, y in xy:
if not (x1 <= x <= x2 and y1 <= y <= y2):
continue
cnt = (x2 - x1) + (y2 - y1) + 1
cnt += count(x1, y1, x - 1, y - 1)
cnt += count(x1, y + 1, x - 1, y2)
cnt += count(x + 1, y1, x2, y - 1)
cnt += count(x + 1, y + 1, x2, y2)
res = max(res, cnt)
return res
print(count(1, 1, w, h))
class ABC009:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
print((n + 1) // 2)
@staticmethod
def b():
n, *a = map(int, sys.stdin.read().split())
print(sorted(set(a))[-2])
@staticmethod
def c():
n, k = map(int, sys.stdin.readline().split())
s = list(sys.stdin.readline().rstrip())
cost = [1] * n
r = k
for i in range(n - 1):
q = []
for j in range(i + 1, n):
if s[j] < s[i] and cost[i] + cost[j] <= r:
heappush(q, (s[j], cost[i] + cost[j], -j))
if not q:
continue
_, c, j = heappop(q)
j = -j
s[i], s[j] = s[j], s[i]
r -= c
cost[i] = cost[j] = 0
print("".join(s))
@staticmethod
def d():
k, m = map(int, sys.stdin.readline().split())
a = np.array([int(x) for x in sys.stdin.readline().split()])
c = np.array([int(x) for x in sys.stdin.readline().split()])
mask = (1 << 32) - 1
d = np.eye(k, k, -1, dtype=np.uint32) * mask
d[0] = c
if m <= k:
print(a[m - 1])
return
# print(Algebra.bitwise_mat_pow(d, m-k))
# print(Algebra.bitwise_dot(Algebra.bitwise_mat_pow(d, m-k), a[::-1].reshape(-1, 1))[0].item())
print(
Algebra.bitwise_dot(
Algebra.bitwise_mat_pow(d, m - k), a[::-1].reshape(-1, 1)
)[0][0]
)
class ABC010:
@staticmethod
def a():
print(sys.stdin.readline().rstrip() + "pp")
@staticmethod
def b():
n, *a = map(int, sys.stdin.read().split())
tot = 0
for x in a:
c = 0
while x % 2 == 0 or x % 3 == 2:
x -= 1
c += 1
tot += c
print(tot)
@staticmethod
def c():
sx, sy, gx, gy, t, v, n, *xy = map(int, sys.stdin.read().split())
x, y = np.array(xy).reshape(-1, 2).T
def dist(x1, y1, x2, y2):
return np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
ans = (
"YES"
if (dist(sx, sy, x, y) + dist(x, y, gx, gy) <= v * t).any()
else "NO"
)
print(ans)
@staticmethod
def d():
n, g, e = map(int, sys.stdin.readline().split())
p = [int(x) for x in sys.stdin.readline().split()]
x, y = [], []
for _ in range(e):
a, b = map(int, sys.stdin.readline().split())
x.append(a)
y.append(b)
x.append(b)
y.append(a)
for a in p:
x.append(a)
y.append(n)
if not x:
print(0)
return
c = [1] * len(x)
min_cut = maximum_flow(
csr_matrix((c, (x, y)), (n + 1, n + 1)), source=0, sink=n
).flow_value
print(min_cut)
@staticmethod
def d_2():
n, g, e = map(int, sys.stdin.readline().split())
graph = nx.DiGraph()
graph.add_nodes_from(range(n + 1))
for p in [int(x) for x in sys.stdin.readline().split()]:
graph.add_edge(p, n, capacity=1)
for _ in range(e):
a, b = map(int, sys.stdin.readline().split())
graph.add_edge(a, b, capacity=1)
graph.add_edge(b, a, capacity=1)
print(nx.minimum_cut_value(graph, 0, n))
@staticmethod
def d_3():
n, g, e = map(int, sys.stdin.readline().split())
graph = GeometryTopology.Graph()
for i in range(n + 1):
graph.add_node(i)
for p in [int(x) for x in sys.stdin.readline().split()]:
graph.add_edge(p, n, capacity=1)
for a, b in zip(*[map(int, sys.stdin.read().split())] * 2):
graph.add_edge(a, b, capacity=1)
graph.add_edge(b, a, capacity=1)
print(graph.dinic(0, n))
class ABC011:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
print(n % 12 + 1)
@staticmethod
def b():
s = sys.stdin.readline().rstrip()
print(s[0].upper() + s[1:].lower())
@staticmethod
def c():
n, *ng = map(int, sys.stdin.read().split())
ng = set(ng)
if n in ng:
print("NO")
else:
r = 100
while n > 0:
if r == 0:
print("NO")
return
for i in range(3, 0, -1):
if (n - i) in ng:
continue
n -= i
r -= 1
break
else:
print("NO")
return
print("YES")
@staticmethod
def d():
n, d, x, y = map(int, sys.stdin.read().split())
x, y = abs(x), abs(y)
if x % d or y % d:
print(0)
return
x, y = x // d, y // d
r = n - (x + y)
if r < 0 or r & 1:
print(0)
return
res = 0
half_p = pow(1 / 2, n)
for d in range(r // 2 + 1): # 0 <= d <= r//2, south
south, north = d, y + d
west = (r - 2 * d) // 2
res += (
half_p
* comb(n, south, exact=True)
* comb(n - south, north, exact=True)
* comb(n - south - north, west, exact=True)
* half_p
)
print(res)
class ABC012:
@staticmethod
def a():
a, b = map(int, sys.stdin.readline().split())
print(b, a)
@staticmethod
def b():
n = int(sys.stdin.readline().rstrip())
h, n = divmod(n, 3600)
m, s = divmod(n, 60)
print(f"{h:02}:{m:02}:{s:02}")
@staticmethod
def c():
n = 2025 - int(sys.stdin.readline().rstrip())
res = []
for i in range(1, 10):
if n % i != 0 or n // i > 9:
continue
res.append(f"{i} x {n//i}")
print(*sorted(res), sep="\n")
@staticmethod
def d():
n, m, *abt = map(int, sys.stdin.read().split())
a, b, t = np.array(abt).reshape(m, 3).T
res = shortest_path(
csr_matrix((t, (a - 1, b - 1)), (n, n)),
method="FW",
directed=False,
)
print(res.max(axis=-1).min().astype(np.int64))
@staticmethod
def d_2():
n, m, *abt = map(int, sys.stdin.read().split())
graph = GeometryTopology.Graph()
for a, b, t in zip(*[iter(abt)] * 3):
a -= 1
b -= 1
graph.add_edge(a, b, weight=t)
graph.add_edge(b, a, weight=t)
dist = graph.floyd_warshall()
res = min([max(tmp.values()) for tmp in dist.values()])
print(res)
class ABC013:
@staticmethod
def a():
print(ord(sys.stdin.readline().rstrip()) - ord("A") + 1)
@staticmethod
def b():
a, b = map(int, sys.stdin.read().split())
d = abs(a - b)
print(min(d, 10 - d))
@staticmethod
def c():
n, h, a, b, c, d, e = map(int, sys.stdin.read().split())
y = np.arange(n + 1)
x = (n * e - h - (d + e) * y) // (b + e) + 1
np.maximum(x, 0, out=x)
np.minimum(x, n - y, out=x)
print(np.amin(a * x + c * y))
@staticmethod
def d():
n, m, d, *a = map(int, sys.stdin.read().split())
res = list(range(n))
def swap(i, j):
res[i], res[j] = res[j], res[i]
for i in a[::-1]:
swap(i - 1, i)
group = [None] * n
root = [None] * n
index_in_group = [None] * n
for i in range(n):
if root[i] is not None:
continue
group[i] = []
j = i
for cnt in range(1, n + 1):
index_in_group[j] = cnt - 1
group[i].append(j)
j = res[j]
root[j] = i
if j == i:
break
for i in range(n):
g = group[root[i]]
print(g[(index_in_group[i] + d) % len(g)] + 1)
class ABC014:
@staticmethod
def a():
a, b = map(int, sys.stdin.read().split())
print((a + b - 1) // b * b - a)
@staticmethod
def b():
n, x, *a = map(int, sys.stdin.read().split())
print(sum(a[i] for i in range(n) if x >> i & 1))
@staticmethod
def c():
n, *ab = map(int, sys.stdin.read().split())
a, b = np.array(ab).reshape(n, 2).T
res = np.zeros(10**6 + 2, dtype=np.int64)
np.add.at(res, a, 1)
np.subtract.at(res, b + 1, 1)
np.cumsum(res, out=res)
print(res.max())
@staticmethod
def d():
n = int(sys.stdin.readline().rstrip())
# edges = [[] for _ in range(n)]
g = GeometryTopology.Graph()
for _ in range(n - 1):
x, y = map(int, sys.stdin.readline().split())
x -= 1
y -= 1
g.add_edge(x, y, weight=1)
g.add_edge(y, x, weight=1)
g.init_tree()
# tree = GeometryTopology.TreeGraph(n, edges, 0)
q, *ab = map(int, sys.stdin.read().split())
for a, b in zip(*[iter(ab)] * 2):
a -= 1
b -= 1
print(g.find_dist(a, b) + 1)
class ABC015:
@staticmethod
def a():
a, b = sys.stdin.read().split()
print(a if len(a) > len(b) else b)
@staticmethod
def b():
n, *a = map(int, sys.stdin.read().split())
a = np.array(a)
print(
np.ceil(
a[np.nonzero(a)[0]].sum() / np.count_nonzero(a)
).astype(np.int8)
)
@staticmethod
def c():
n, k, *t = map(int, sys.stdin.read().split())
t = np.array(t).reshape(n, k)
x = np.zeros((1, 1), dtype=np.int8)
for i in range(n):
x = x.reshape(-1, 1) ^ t[i]
print("Found" if np.count_nonzero(x == 0) > 0 else "Nothing")
@staticmethod
def d():
w, n, k, *ab = map(int, sys.stdin.read().split())
dp = np.zeros((k + 1, w + 1), dtype=np.int32)
for a, b in zip(*[iter(ab)] * 2):
prev = dp.copy()
np.maximum(dp[1:, a:], prev[:-1, :-a] + b, out=dp[1:, a:])
print(dp[k][w])
class ABC016:
@staticmethod
def a():
m, d = map(int, sys.stdin.readline().split())
print("YES" if m % d == 0 else "NO")
@staticmethod
def b():
a, b, c = map(int, sys.stdin.readline().split())
f1, f2 = a + b == c, a - b == c
if f1 & f2:
print("?")
elif f1 & (~f2):
print("+")
elif (~f1) & f2:
print("-")
else:
print("!")
@staticmethod
def c():
n, _, *ab = map(int, sys.stdin.read().split())
friends = [0] * n
for a, b in zip(*[iter(ab)] * 2):
a -= 1
b -= 1
friends[a] |= 1 << b
friends[b] |= 1 << a
res = [
bit_count(
cumor(friends[j] for j in range(n) if friends[i] >> j & 1)
& ~(friends[i] | 1 << i)
)
for i in range(n)
]
print(*res, sep="\n")
@staticmethod
def d():
sx, sy, gx, gy = map(int, sys.stdin.readline().split())
seg1 = ((sx, sy), (gx, gy))
n = int(sys.stdin.readline().rstrip())
p1 = (
np.array(sys.stdin.read().split(), dtype=np.int64)
.reshape(n, 2)
.T
)
p2 = np.hstack((p1[:, 1:], p1[:, :1]))
seg2 = (p1, p2)
print(
np.count_nonzero(GeometryTopology.intersect(seg1, seg2)) // 2
+ 1
)
class ABC017:
@staticmethod
def a():
s, e = (
np.array(sys.stdin.read().split(), dtype=np.int16)
.reshape(3, 2)
.T
)
print((s // 10 * e).sum())
@staticmethod
def b():
choku_tail = set("ch, o, k, u".split(", "))
def is_choku(s):
if s == "":
return True
if len(s) >= 1 and (s[-1] in choku_tail) and is_choku(s[:-1]):
return True
if len(s) >= 2 and (s[-2:] in choku_tail) and is_choku(s[:-2]):
return True
return False
print("YES" if is_choku(sys.stdin.readline().rstrip()) else "NO")
@staticmethod
def c():
n, m, *lrs = map(int, sys.stdin.read().split())
l, r, s = np.array(lrs).reshape(n, 3).T
score = np.zeros((m + 1,), dtype=np.int32)
np.add.at(score, l - 1, s)
np.subtract.at(score, r, s)
np.cumsum(score, out=score)
print(s.sum() - score[:m].min())
@staticmethod
def d():
n, m, *f = map(int, sys.stdin.read().split())
prev = [0] * (n + 1)
tmp = defaultdict(int)
for i in range(n):
prev[i + 1] = tmp[f[i]]
tmp[f[i]] = i + 1
dp = [0] * (n + 1)
dp[0] = 1
l, s = 0, dp[0]
for i in range(1, n + 1):
while l < prev[i]:
s = (s - dp[l]) % MOD
l += 1
dp[i] = s
s = (s + dp[i]) % MOD
print(dp[n])
class ABC018:
@staticmethod
def a():
(*a,) = map(int, sys.stdin.read().split())
a = sorted(enumerate(a), key=lambda x: -x[1])
res = [None] * 3
for i in range(3):
res[a[i][0]] = i + 1
print(*res, sep="\n")
@staticmethod
def b():
s = sys.stdin.readline().rstrip()
n, *lr = map(int, sys.stdin.read().split())
for l, r in zip(*[iter(lr)] * 2):
l -= 1
r -= 1
s = s[:l] + s[l : r + 1][::-1] + s[r + 1 :]
print(s)
@staticmethod
def c():
r, c, k = map(int, sys.stdin.readline().split())
s = np.array([list(s) for s in sys.stdin.read().split()])
s = np.pad(s, 1, constant_values="x")
a = np.zeros_like(s, dtype=np.float64)
a[s == "o"] = np.inf
for i in range(1, r + 1):
np.minimum(a[i - 1, :] + 1, a[i, :], out=a[i, :])
for i in range(r, 0, -1):
np.minimum(a[i + 1, :] + 1, a[i, :], out=a[i, :])
for j in range(1, c + 1):
np.minimum(a[:, j - 1] + 1, a[:, j], out=a[:, j])
for j in range(c, 0, -1):
np.minimum(a[:, j + 1] + 1, a[:, j], out=a[:, j])
print(np.count_nonzero(a >= k))
@staticmethod
def c_2():
r, c, k = map(int, sys.stdin.readline().split())
s = np.array([list(s) for s in sys.stdin.read().split()])
s = np.pad(s, 1, constant_values="x")
a = (s == "o").astype(np.int16)
a = distance_transform_cdt(a, metric="taxicab")
print(np.count_nonzero(a >= k))
@staticmethod
def d():
n, m, p, q, r, *xyz = map(int, sys.stdin.read().split())
x, y, z = np.array(xyz).reshape(r, 3).T
h = np.zeros((n, m), dtype=np.int32)
h[x - 1, y - 1] = z
g = np.array([*itertools.combinations(range(n), p)])
print(np.sort(h[g].sum(axis=1), axis=1)[:, -q:].sum(axis=1).max())
class ABC019:
@staticmethod
def a():
(*a,) = map(int, sys.stdin.readline().split())
print(sorted(a)[1])
@staticmethod
def b():
s = sys.stdin.readline().rstrip() + "$"
cnt = 0
prev = "$"
t = ""
for c in s:
if c == prev:
cnt += 1
continue
t += prev + str(cnt)
prev = c
cnt = 1
print(t[2:])
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
res = set()
for x in a:
while not x & 1:
x >>= 1
res.add(x)
print(len(res))
@staticmethod
def d():
def inquire(u, v):
print(f"? {u} {v}".format(u, v), flush=True)
return int(sys.stdin.readline().rstrip())
n = int(sys.stdin.readline().rstrip())
u = sorted([(inquire(1, v), v) for v in range(2, n + 1)])[-1][1]
d = max((inquire(u, v)) for v in range(1, n + 1) if u != v)
print(f"! {d}")
class ABC020:
@staticmethod
def a():
print(
"ABC"
if int(sys.stdin.readline().rstrip()) == 1
else "chokudai"
)
@staticmethod
def b():
a, b = sys.stdin.readline().split()
print(int(a + b) * 2)
@staticmethod
def c():
h, w, t = map(int, sys.stdin.readline().split())
s = [list(s) for s in sys.stdin.read().split()]
for i in range(h):
for j in range(w):
if s[i][j] == "S":
sy, sx = i, j
if s[i][j] == "G":
gy, gx = i, j
s[sy][sx] = s[gy][gx] = "."
source, target = (sy, sx), (gy, gx)
def heuristic_function(u, v=target):
return abs(v[0] - u[0]) + abs(v[1] - u[0])
def min_time(x):
"""my lib"""
graph = GeometryTopology.Graph()
for i in range(h):
for j in range(w):
graph.add_node((i, j))
for i in range(h):
for j in range(w):
if i > 0:
graph.add_edge(
(i, j),
(i - 1, j),
weight=(1 if s[i - 1][j] == "." else x),
)
if i < h - 1:
graph.add_edge(
(i, j),
(i + 1, j),
weight=(1 if s[i + 1][j] == "." else x),
)
if j > 0:
graph.add_edge(
(i, j),
(i, j - 1),
weight=(1 if s[i][j - 1] == "." else x),
)
if j < w - 1:
graph.add_edge(
(i, j),
(i, j + 1),
weight=(1 if s[i][j + 1] == "." else x),
)
return graph.dijkstra(source)[target]
# return graph.astar(source, target, heuristic_function)
"""networkx"""
graph = nx.DiGraph()
for i in range(h):
for j in range(w):
if i > 0:
graph.add_edge(
(i, j),
(i - 1, j),
weight=(1 if s[i - 1][j] == "." else x),
)
if i < h - 1:
graph.add_edge(
(i, j),
(i + 1, j),
weight=(1 if s[i + 1][j] == "." else x),
)
if j > 0:
graph.add_edge(
(i, j),
(i, j - 1),
weight=(1 if s[i][j - 1] == "." else x),
)
if j < w - 1:
graph.add_edge(
(i, j),
(i, j + 1),
weight=(1 if s[i][j + 1] == "." else x),
)
return nx.dijkstra_path_length(graph, source, target)
return nx.astar_path_length(
graph, source, target, heuristic_function
)
def binary_search():
lo, hi = 1, t + 1
while lo + 1 < hi:
x = (lo + hi) // 2
if min_time(x) > t:
hi = x
else:
lo = x
return lo
print(binary_search())
@staticmethod
def d():
n, k = map(int, sys.stdin.readline().split())
div = sorted(NumberTheory.find_divisors(k))
l = len(div)
s = [0] * l
for i, d in enumerate(div):
s[i] = (1 + n // d) * (n // d) // 2 * d % MOD
for i in range(l - 1, -1, -1):
for j in range(i + 1, l):
if div[j] % div[i]:
continue
s[i] = (s[i] - s[j]) % MOD
print(
sum(s[i] * k // div[i] % MOD for i in range(l)) % MOD
) # ans is LCM.
class ABC021:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
s = [1 << i for i in range(5) if n >> i & 1]
print(len(s), *s, sep="\n")
@staticmethod
def b():
n, a, b, k, *p = map(int, sys.stdin.read().split())
print("YES" if len(set(p) | set([a, b])) == k + 2 else "NO")
@staticmethod
def c():
n, a, b, m, *xy = map(int, sys.stdin.read().split())
x, y = np.array(xy).reshape(m, 2).T - 1
a -= 1
b -= 1
g = csgraph_to_dense(
csr_matrix((np.ones(m), (x, y)), (n, n), dtype=np.int8)
)
g = np.logical_or(g, g.T)
paths = np.zeros(n, dtype=np.int64).reshape(-1, 1)
paths[a, 0] = 1
while not paths[b, 0]:
paths = np.dot(g, paths) % MOD
print(paths[b, 0])
@staticmethod
def c_2():
n, a, b, m, *xy = map(int, sys.stdin.read().split())
a -= 1
b -= 1
g = GeometryTopology.Graph()
for x, y in zip(*[iter(xy)] * 2):
x -= 1
y -= 1
g.add_edge(x, y, weight=1)
g.add_edge(y, x, weight=1)
dist, paths = g.dijkstra(a, paths_cnt=True, mod=MOD)
print(paths[b])
@staticmethod
def d():
n, k = map(int, sys.stdin.read().split())
combinatorics = Combinatorics()
print(combinatorics.mod_choose(n + k - 1, k))
class ABC022:
@staticmethod
def a():
n, s, t, *a = map(int, sys.stdin.read().split())
a = np.array(a)
np.cumsum(a, out=a)
print(((s <= a) & (a <= t)).sum())
@staticmethod
def b():
n, *a = map(int, sys.stdin.read().split())
c = Counter(a)
print(sum(c.values()) - len(c))
@staticmethod
def c():
n, m, *uvl = map(int, sys.stdin.read().split())
u, v, l = np.array(uvl).reshape(m, 3).T
u -= 1
v -= 1
g = csgraph_to_dense(csr_matrix((l, (u, v)), (n, n)))
g += g.T
g[g == 0] = np.inf
dist0 = g[0].copy()
g[0] = 0
g[:, 0] = 0
dist = shortest_path(g, method="FW", directed=False)
u, v = np.array([*itertools.combinations(range(1, n), 2)]).T
res = (dist0[u] + dist[u, v] + dist0[v]).min()
print(-1 if res == np.inf else int(res))
@staticmethod
def d():
n, *ab = map(int, sys.stdin.read().split())
c = np.array(ab).reshape(2, n, 2)
g = c.mean(axis=1)
d = np.sqrt(((c - g[:, None, :]) ** 2).sum(axis=-1)).sum(axis=1)
print(d[1] / d[0])
class ABC023:
@staticmethod
def a():
print(sum(divmod(int(sys.stdin.readline().rstrip()), 10)))
@staticmethod
def b():
n, s = sys.stdin.read().split()
n = int(n)
t = "b"
for i in range(n // 2):
if i % 3 == 0:
t = "a" + t + "c"
elif i % 3 == 1:
t = "c" + t + "a"
else:
t = "b" + t + "b"
print(n // 2 if t == s else -1)
@staticmethod
def b_2():
n, s = sys.stdin.read().split()
n = int(n)
if n & 1 ^ 1:
print(-1)
return
a = list("abc")
i = (1 - n // 2) % 3
for c in s:
if c != a[i]:
print(-1)
return
i = (i + 1) % 3
print(n // 2)
@staticmethod
def c():
h, w, k, n, *rc = map(int, sys.stdin.read().split())
r, c = np.array(rc).reshape(n, 2).T - 1
rb = np.bincount(r, minlength=h)
cb = np.bincount(c, minlength=w)
rbb = np.bincount(rb, minlength=k + 1)
cbb = np.bincount(cb, minlength=k + 1)
tot = (rbb[: k + 1] * cbb[k::-1]).sum()
real = np.bincount(rb[r] + cb[c] - 1, minlength=k + 1)
print(tot - real[k - 1] + real[k])
@staticmethod
def d():
n, *hs = map(int, sys.stdin.read().split())
h, s = np.array(hs).reshape(n, 2).T
t = np.arange(n)
def is_ok(x):
t_lim = (x - h) // s
t_lim.sort()
return np.all(t_lim >= t)
def binary_search():
lo, hi = 0, 10**14
while lo + 1 < hi:
x = (lo + hi) // 2
if is_ok(x):
hi = x
else:
lo = x
return hi
print(binary_search())
class ABC024:
@staticmethod
def a():
a, b, c, k, s, t = map(int, sys.stdin.read().split())
print(a * s + b * t - c * (s + t) * (s + t >= k))
@staticmethod
def b():
n, t, *a = map(int, sys.stdin.read().split())
a = np.array(a)
print(np.minimum(a[1:] - a[:-1], t).sum() + t)
@staticmethod
def c():
n, d, k, *lrst = map(int, sys.stdin.read().split())
lrst = np.array(lrst)
lr = lrst[: 2 * d].reshape(d, 2)
s, t = lrst[2 * d :].reshape(k, 2).T
day = np.zeros((k,), dtype=np.int32)
for i in range(d):
l, r = lr[i]
move = (l <= s) & (s <= r) & (s != t)
reach = move & (l <= t) & (t <= r)
s[move & (s < t)] = r
s[move & (s > t)] = l
s[reach] = t[reach]
day[reach] = i + 1
print(*day, sep="\n")
@staticmethod
def d():
a, b, c = map(int, sys.stdin.read().split())
p = MOD
denom = pow(a * b % p - b * c % p + c * a % p, p - 2, p)
w = (b * c - a * b) % p * denom % p
h = (b * c - a * c) % p * denom % p
print(h, w)
class ABC025:
@staticmethod
def a():
s, n = sys.stdin.read().split()
n = int(n)
i, j = divmod(n - 1, 5)
print(s[i] + s[j])
@staticmethod
def b():
n, a, b = map(int, sys.stdin.readline().split())
res = defaultdict(int)
for _ in range(n):
s, d = sys.stdin.readline().split()
d = int(d)
res[s] += min(max(d, a), b)
res = res["East"] - res["West"]
if res == 0:
ans = 0
elif res > 0:
ans = f"East {res}"
else:
ans = f"West {-res}"
print(ans)
@staticmethod
def c():
b = [0] * 6
for i in range(2):
(*row,) = map(int, sys.stdin.readline().split())
for j in range(3):
b[i * 3 + j] = row[j]
c = [0] * 8
for i in range(3):
(*row,) = map(int, sys.stdin.readline().split())
for j in range(2):
c[i * 3 + j] = row[j]
tot = sum(b) + sum(c)
@lru_cache(maxsize=None)
def f(s=tuple(0 for _ in range(9))):
if all(s):
res = 0
for i in range(6):
res += (s[i] == s[i + 3]) * b[i]
for i in range(8):
res += (s[i] == s[i + 1]) * c[i]
return res
cand = [i for i in range(9) if not s[i]]
flg = len(cand) & 1
s = list(s)
res = []
for i in cand:
s[i] = (flg ^ 1) + 1
res.append(f(tuple(s)))
s[i] = 0
return sorted(res, reverse=flg)[0]
a = f()
b = tot - a
print(a)
print(b)
class ABC026:
@staticmethod
def a():
a = int(sys.stdin.readline().rstrip())
print(a // 2 * (a - a // 2))
@staticmethod
def b():
n, *r = map(int, sys.stdin.read().split())
s = np.pi * np.array([0] + r) ** 2
s.sort()
res = s[n::-2].sum() - s[n - 1 :: -2].sum()
print(res)
@staticmethod
def c():
n, *b = map(int, sys.stdin.read().split())
g = GeometryTopology.Graph()
for i in range(1, n):
g.add_edge(b[i - 1] - 1, i, weight=1)
def f(u=0):
if not g.edges[u]:
return 1
s = [f(v) for v in g.edges[u]]
return max(s) + min(s) + 1
print(f())
@staticmethod
def d():
a, b, c = map(int, sys.stdin.readline().split())
def f(t):
return a * t + b * np.sin(c * t * np.pi) - 100
print(optimize.brenth(f, 0, 200))
class ABC027:
@staticmethod
def a():
l = [int(l) for l in sys.stdin.readline().split()]
l.sort()
print(l[2] if l[0] == l[1] else l[0])
@staticmethod
def b():
n, *a = map(int, sys.stdin.read().split())
m, r = divmod(sum(a), n)
if r:
print(-1)
return
population = 0
towns = 0
cnt = 0
for x in a:
population += x
towns += 1
if population / towns != m:
cnt += 1
continue
population, towns = 0, 0
print(cnt)
@staticmethod
def c():
n = int(sys.stdin.readline().rstrip())
flg = n.bit_length() & 1 ^ 1
t = 0
x = 1
while x <= n:
t += 1
x = 2 * x + 1 if t & 1 ^ flg else 2 * x
print("Aoki" if t & 1 else "Takahashi")
class ABC032:
@staticmethod
def a():
a, b, n = map(int, sys.stdin.read().split())
l = NumberTheory.lcm(a, b)
print((n + l - 1) // l * l)
@staticmethod
def b():
s, k = sys.stdin.read().split()
k = int(k)
res = set()
for i in range(len(s) - k + 1):
res.add(s[i : i + k])
print(len(res))
@staticmethod
def c():
n, k, *s = map(int, sys.stdin.read().split())
if 0 in s:
print(n)
return
s += [inf]
res = 0
l = r = 0
tmp = 1
while r <= n:
tmp *= s[r]
while tmp > k:
res = max(res, r - l)
tmp //= s[l]
l += 1
r += 1
print(res)
class ABC033:
@staticmethod
def a():
n = set(sys.stdin.readline().rstrip())
print("SAME" if len(n) == 1 else "DIFFERENT")
@staticmethod
def b():
n = int(sys.stdin.readline().rstrip())
res = dict()
for _ in range(n):
s, p = sys.stdin.readline().split()
p = int(p)
res[s] = p
tot = sum(res.values())
for s, p in res.items():
if p > tot / 2:
print(s)
return
print("atcoder")
@staticmethod
def c():
s = sys.stdin.readline().rstrip()
res = sum(not "0" in f for f in s.split("+"))
print(res)
class ABC034:
@staticmethod
def a():
x, y = map(int, sys.stdin.readline().split())
print("Better" if y > x else "Worse")
@staticmethod
def b():
n = int(sys.stdin.readline().rstrip())
print(n + 1 if n & 1 else n - 1)
@staticmethod
def c():
h, w = map(int, sys.stdin.read().split())
combinatorics = Combinatorics(n=2 * 10**5, mod=MOD)
print(combinatorics.mod_choose(h + w - 2, h - 1))
@staticmethod
def d():
n, k, *wp = map(int, sys.stdin.read().split())
w, p = np.array(wp).reshape(-1, 2).T
def f(x):
return np.sort(w * (p - x))[-k:].sum()
print(optimize.bisect(f, 0, 100))
class ABC035:
@staticmethod
def a():
w, h = map(int, sys.stdin.readline().split())
print("4:3" if 4 * h == 3 * w else "16:9")
@staticmethod
def b():
s, t = sys.stdin.read().split()
y = 0
x = 0
z = 0
for c in s:
if c == "?":
z += 1
elif c == "L":
x -= 1
elif c == "R":
x += 1
elif c == "D":
y -= 1
elif c == "U":
y += 1
d = abs(y) + abs(x)
if t == "1":
print(d + z)
else:
print(max(d - z, (d - z) & 1))
@staticmethod
def c():
n, q, *lr = map(int, sys.stdin.read().split())
l, r = np.array(lr).reshape(q, 2).T
res = np.zeros(n + 1, dtype=int)
np.add.at(res, l - 1, 1)
np.subtract.at(res, r, 1)
np.cumsum(res, out=res)
res = res & 1
print("".join(map(str, res[:-1])))
@staticmethod
def d():
n, m, t = map(int, sys.stdin.readline().split())
point = np.array(sys.stdin.readline().split(), dtype=int)
a, b, c = (
np.array(sys.stdin.read().split(), dtype=np.int64)
.reshape(m, 3)
.T
)
a -= 1
b -= 1
d_1 = shortest_path(
csr_matrix((c, (a, b)), (n, n)),
method="D",
directed=True,
indices=0,
)
d_2 = shortest_path(
csr_matrix((c, (b, a)), (n, n)),
method="D",
directed=True,
indices=0,
)
print(int(np.amax((t - (d_1 + d_2)) * point)))
class ABC036:
@staticmethod
def a():
a, b = map(int, sys.stdin.readline().split())
print((b + a - 1) // a)
@staticmethod
def b():
n, *s = sys.stdin.read().split()
n = int(n)
for j in range(n):
row = ""
for i in range(n - 1, -1, -1):
row += s[i][j]
print(row)
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
b = [None] * n
prev = None
j = -1
for i, x in sorted(enumerate(a), key=lambda x: x[1]):
if x != prev:
j += 1
b[i] = j
prev = x
print(*b, sep="\n")
@staticmethod
def d():
n, *ab = map(int, sys.stdin.read().split())
edges = [[] for _ in range(n)]
for a, b in zip(*[iter(ab)] * 2):
a -= 1
b -= 1
edges[a].append(b)
edges[b].append(a)
parent = [None] * n
def count(u):
black, white = 1, 1
for v in edges[u]:
if v == parent[u]:
continue
parent[v] = u
b, w = count(v)
black *= w
black %= MOD
white *= (b + w) % MOD
white %= MOD
return black, white
print(sum(count(0)) % MOD)
class ABC037:
@staticmethod
def a():
a, b, c = map(int, sys.stdin.readline().split())
print(c // min(a, b))
@staticmethod
def b():
n, q, *lrt = map(int, sys.stdin.read().split())
a = np.zeros(n, dtype=int)
for l, r, t in zip(*[iter(lrt)] * 3):
a[l - 1 : r] = t
print(*a, sep="\n")
@staticmethod
def c():
n, k, *a = map(int, sys.stdin.read().split())
a = np.array([0] + a)
np.cumsum(a, out=a)
s = (a[k:] - a[:-k]).sum()
print(s)
@staticmethod
def d():
h, w = map(int, sys.stdin.readline().split())
a = [
[int(x) for x in sys.stdin.readline().split()]
for _ in range(h)
]
dyx = [(-1, 0), (0, -1), (1, 0), (0, 1)]
path = [[None] * w for _ in range(h)]
def paths(i, j):
if path[i][j]:
return path[i][j]
val = a[i][j]
cnt = 1
for dy, dx in dyx:
y = i + dy
x = j + dx
if 0 <= y < h and 0 <= x < w and a[y][x] < val:
cnt += paths(y, x)
cnt %= MOD
path[i][j] = cnt
return cnt
tot = 0
for i in range(h):
for j in range(w):
tot += paths(i, j)
tot %= MOD
print(tot)
class ABC038:
@staticmethod
def a():
s = sys.stdin.readline().rstrip()
print("YES" if s[-1] == "T" else "NO")
@staticmethod
def b():
a, b, c, d = map(int, sys.stdin.read().split())
print("YES" if a == c or b == c or a == d or b == d else "NO")
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
a += [-1]
cnt = n
tmp = 1
for i in range(n):
if a[i + 1] > a[i]:
tmp += 1
else:
cnt += tmp * (tmp - 1) // 2
tmp = 1
print(cnt)
@staticmethod
def d():
n, *wh = map(int, sys.stdin.read().split())
wh = sorted(zip(*[iter(wh)] * 2), key=lambda x: (-x[0], x[1]))
w = [x[1] for x in wh][::-1]
res = [inf] * n
for x in w:
res[bi_l(res, x)] = x
print(bi_l(res, inf))
class ABC039:
@staticmethod
def a():
a, b, c = map(int, sys.stdin.readline().split())
print((a * b + b * c + c * a) * 2)
@staticmethod
def b():
x = int(sys.stdin.readline().rstrip())
for n in range(1, int(x**0.5) + 1):
if pow(n, 4) == x:
print(n)
return
@staticmethod
def c():
board = "WBWBWWBWBWBW" * 3
convert = "Do, *, Re, *, Mi, Fa, *, So, *, La, *, Si".split(", ")
s = sys.stdin.readline().rstrip()
print(convert[board.index(s)])
@staticmethod
def d():
h, w = map(int, sys.stdin.readline().split())
s = sys.stdin.read().split()
dyx = list(itertools.product((-1, 0, 1), repeat=2))
black_certain = set()
black_before = set()
for i in range(h):
for j in range(w):
black_cand = set()
for dy, dx in dyx:
y = i + dy
x = j + dx
if y < 0 or y >= h or x < 0 or x >= w:
continue
if s[y][x] == ".":
break
black_cand.add((y, x))
else:
black_before.add((i, j))
black_certain |= black_cand
for i in range(h):
for j in range(w):
if s[i][j] == "#" and not (i, j) in black_certain:
print("impossible")
return
print("possible")
for i in range(h):
row = ""
for j in range(w):
row += "#" if (i, j) in black_before else "."
print("".join(row))
class ABC040:
@staticmethod
def a():
n, x = map(int, sys.stdin.readline().split())
print(min(x - 1, n - x))
@staticmethod
def b():
n = int(sys.stdin.readline().rstrip())
res = inf
for i in range(1, int(n**0.5) + 1):
res = min(res, n // i - i + n % i)
print(res)
@staticmethod
def c():
n, *h = map(int, sys.stdin.read().split())
h = [h[0]] + h
cost = [None] * (n + 1)
cost[0] = cost[1] = 0
for i in range(2, n + 1):
cost[i] = min(
cost[i - 2] + abs(h[i] - h[i - 2]),
cost[i - 1] + abs(h[i] - h[i - 1]),
)
print(cost[n])
@staticmethod
def d():
n, m = map(int, sys.stdin.readline().split())
uf = GeometryTopology.UnionFind(n=n)
queue = []
for _ in range(m):
a, b, y = map(int, sys.stdin.readline().split())
heappush(queue, (-(2 * y), a - 1, b - 1))
q = int(sys.stdin.readline().rstrip())
for i in range(q):
v, y = map(int, sys.stdin.readline().split())
heappush(queue, (-(2 * y + 1), v - 1, i))
res = [None] * q
while queue:
y, i, j = heappop(queue)
if y & 1:
res[j] = uf.size[uf.find_root(i)]
else:
uf.unite(i, j)
print(*res, sep="\n")
class ABC041:
@staticmethod
def a():
s, i = sys.stdin.read().split()
i = int(i)
print(s[i - 1])
@staticmethod
def b():
a, b, c = map(int, sys.stdin.readline().split())
ans = a * b % MOD * c % MOD
print(ans)
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
for i, h in sorted(enumerate(a), key=lambda x: -x[1]):
print(i + 1)
@staticmethod
def d():
n, m, *xy = map(int, sys.stdin.read().split())
(*xy,) = zip(*[iter(xy)] * 2)
edges = [0] * n
for x, y in xy:
x -= 1
y -= 1
edges[x] |= 1 << y
comb = [None] * (1 << n)
comb[0] = 1
def count(edges, bit):
if comb[bit] is not None:
return comb[bit]
comb[bit] = 0
for i in range(n):
if (bit >> i) & 1 and not edges[i]:
nxt_bit = bit & ~(1 << i)
nxt_edges = edges.copy()
for j in range(n):
nxt_edges[j] &= ~(1 << i)
cnt = count(nxt_edges, nxt_bit)
comb[bit] += cnt
return comb[bit]
print(count(edges, (1 << n) - 1))
class ABC042:
@staticmethod
def a():
a = [int(x) for x in sys.stdin.readline().split()]
c = Counter(a)
print("YES" if c[5] == 2 and c[7] == 1 else "NO")
@staticmethod
def b():
n, l, *s = sys.stdin.read().split()
print("".join(sorted(s)))
@staticmethod
def c():
n, k, *d = sys.stdin.read().split()
l = len(n)
ok = sorted(set(string.digits) - set(d))
cand = [
int("".join(p)) for p in itertools.product(ok, repeat=l)
] + [int(min(x for x in ok if x > "0") + min(ok) * l)]
print(cand[bi_l(cand, int(n))])
@staticmethod
def d():
h, w, a, b = map(int, sys.stdin.read().split())
combinatorics = Combinatorics(n=2 * 10**5, mod=MOD)
tot = combinatorics.mod_choose(h + w - 2, h - 1)
i = np.arange(h - a, h)
ng = np.sum(
combinatorics.mod_choose(i + b - 1, i)
* combinatorics.mod_choose(h - i + w - b - 2, h - 1 - i)
% MOD
)
tot -= ng
tot %= MOD
print(tot)
class ABC043:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
print((1 + n) * n // 2)
@staticmethod
def b():
s = sys.stdin.readline().rstrip()
t = ""
for c in s:
if c == "B":
t = t[:-1]
else:
t += c
print(t)
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
a = np.array(a)
x = np.around(a.sum() / n).astype(int)
print(np.sum((a - x) ** 2))
@staticmethod
def d():
s = sys.stdin.readline().rstrip()
n = len(s)
for i in range(n - 1):
if s[i] == s[i + 1]:
print(i + 1, i + 2)
return
for i in range(n - 2):
if s[i] == s[i + 2]:
print(i + 1, i + 3)
return
print(-1, -1)
class ABC170:
@staticmethod
def a():
x = [int(x) for x in sys.stdin.readline().split()]
for i in range(5):
if x[i] != i + 1:
print(i + 1)
break
@staticmethod
def b():
x, y = map(int, sys.stdin.readline().split())
print("Yes" if 2 * x <= y <= 4 * x and y % 2 == 0 else "No")
@staticmethod
def c():
x, n, *p = map(int, sys.stdin.read().split())
a = list(set(range(102)) - set(p))
a = [(abs(y - x), y) for y in a]
print(sorted(a)[0][1])
@staticmethod
def d():
n, *a = map(int, sys.stdin.read().split())
cand = set(a)
cnt = 0
for x, c in sorted(Counter(a).items()):
cnt += c == 1 and x in cand
cand -= set(range(x * 2, 10**6 + 1, x))
print(cnt)
@staticmethod
def e():
n, q = map(int, sys.stdin.readline().split())
queue = []
m = 2 * 10**5
infants = [[] for _ in range(m)]
highest_rate = [None] * m
where = [None] * n
rate = [None] * n
def entry(i, k):
where[i] = k
while infants[k]:
r, j = heappop(infants[k])
if where[j] != k or j == i:
continue
if rate[i] >= -r:
highest_rate[k] = rate[i]
heappush(queue, (rate[i], k, i))
heappush(infants[k], (r, j))
break
else:
highest_rate[k] = rate[i]
heappush(queue, (rate[i], k, i))
heappush(infants[k], (-rate[i], i))
def transfer(i, k):
now = where[i]
while infants[now]:
r, j = heappop(infants[now])
if where[j] != now or j == i:
continue
if highest_rate[now] != -r:
highest_rate[now] = -r
heappush(queue, (-r, now, j))
heappush(infants[now], (r, j))
break
else:
highest_rate[now] = None
entry(i, k)
def inquire():
while True:
r, k, i = heappop(queue)
if where[i] != k or r != highest_rate[k]:
continue
heappush(queue, (r, k, i))
return r
for i in range(n):
a, b = map(int, sys.stdin.readline().split())
rate[i] = a
entry(i, b - 1)
for _ in range(q):
c, d = map(int, sys.stdin.readline().split())
transfer(c - 1, d - 1)
print(inquire())
class ABC171:
@staticmethod
def a():
c = sys.stdin.readline().rstrip()
print("A" if c < "a" else "a")
@staticmethod
def b():
n, k, *p = map(int, sys.stdin.read().split())
print(sum(sorted(p)[:k]))
@staticmethod
def c():
n = int(sys.stdin.readline().rstrip())
n -= 1
l = 1
while True:
if n < pow(26, l):
break
n -= pow(26, l)
l += 1
res = "".join(
[chr(ord("a") + d) for d in NumberTheory.base_convert(n, 26)][
::-1
]
)
res = "a" * (l - len(res)) + res
print(res)
@staticmethod
def d():
n = int(sys.stdin.readline().rstrip())
a = [int(x) for x in sys.stdin.readline().split()]
s = sum(a)
cnt = Counter(a)
q = int(sys.stdin.readline().rstrip())
for _ in range(q):
b, c = map(int, sys.stdin.readline().split())
s += (c - b) * cnt[b]
print(s)
cnt[c] += cnt[b]
cnt[b] = 0
@staticmethod
def e():
n, *a = map(int, sys.stdin.read().split())
s = 0
for x in a:
s ^= x
b = map(lambda x: x ^ s, a)
print(*b, sep=" ")
class ABC172:
@staticmethod
def a():
a = int(sys.stdin.readline().rstrip())
print(a * (1 + a + a**2))
@staticmethod
def b():
s, t = sys.stdin.read().split()
print(sum(s[i] != t[i] for i in range(len(s))))
@staticmethod
def c():
n, m, k = map(int, sys.stdin.readline().split())
a = [0] + [int(x) for x in sys.stdin.readline().split()]
b = [int(x) for x in sys.stdin.readline().split()]
(*sa,) = itertools.accumulate(a)
(*sb,) = itertools.accumulate(b)
res = 0
for i in range(n + 1):
r = k - sa[i]
if r < 0:
break
res = max(res, i + bi_r(sb, r))
print(res)
@staticmethod
def d():
n = int(sys.stdin.readline().rstrip())
f = np.zeros(n + 1, dtype=np.int64)
for i in range(1, n + 1):
f[i::i] += 1
print((np.arange(1, n + 1) * f[1:]).sum())
class ABC173:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
charge = (n + 999) // 1000 * 1000 - n
print(charge)
@staticmethod
def b():
n, *s = sys.stdin.read().split()
c = Counter(s)
for v in "AC, WA, TLE, RE".split(", "):
print(f"{v} x {c[v]}")
@staticmethod
def c():
h, w, k = map(int, sys.stdin.readline().split())
c = [sys.stdin.readline().rstrip() for _ in range(h)]
tot = 0
for i in range(1 << h):
for j in range(1 << w):
cnt = 0
for y in range(h):
for x in range(w):
if i >> y & 1 or j >> x & 1:
continue
cnt += c[y][x] == "#"
tot += cnt == k
print(tot)
@staticmethod
def d():
n, *a = map(int, sys.stdin.read().split())
a.sort(reverse=True)
res = (
a[0]
+ sum(a[1 : 1 + (n - 2) // 2]) * 2
+ a[1 + (n - 2) // 2] * (n & 1)
)
print(res)
@staticmethod
def e():
MOD = 10**9 + 7
n, k, *a = map(int, sys.stdin.read().split())
minus = [x for x in a if x < 0]
plus = [x for x in a if x > 0]
if len(plus) + len(minus) // 2 * 2 >= k: # plus
(*minus,) = map(abs, minus)
minus.sort(reverse=True)
plus.sort(reverse=True)
cand = []
if len(minus) & 1:
minus = minus[:-1]
for i in range(0, len(minus) - 1, 2):
cand.append(minus[i] * minus[i + 1] % MOD)
if k & 1:
res = plus[0]
plus = plus[1:]
else:
res = 1
if len(plus) & 1:
plus = plus[:-1]
for i in range(0, len(plus) - 1, 2):
cand.append(plus[i] * plus[i + 1] % MOD)
cand.sort(reverse=True)
for x in cand[: k // 2]:
res *= x
res %= MOD
print(res)
elif 0 in a:
print(0)
else:
cand = sorted(map(abs, a))
res = 1
for i in range(k):
res *= cand[i]
res %= MOD
res = MOD - res
print(res)
pass
class ABC174:
@staticmethod
def a():
print("Yes" if int(sys.stdin.readline().rstrip()) >= 30 else "No")
class ACL001:
@staticmethod
def a():
n, *xy = map(int, sys.stdin.read().split())
(*xy,) = zip(*[iter(xy)] * 2)
print(xy)
pass
class MSolutions2020:
@staticmethod
def a():
x = int(sys.stdin.readline().rstrip())
x -= 400
print(8 - x // 200)
@staticmethod
def b():
r, g, b, k = map(int, sys.stdin.read().split())
while k and g <= r:
g *= 2
k -= 1
while k and b <= g:
b *= 2
k -= 1
print("Yes" if r < g < b else "No")
@staticmethod
def c():
n, k, *a = map(int, sys.stdin.read().split())
for i in range(k, n):
print("Yes" if a[i] > a[i - k] else "No")
@staticmethod
def d():
n, *a = map(int, sys.stdin.read().split())
a += [-1]
m = 1000
s = 0
for i in range(n):
if a[i + 1] == a[i]:
continue
elif a[i + 1] > a[i]:
cnt = m // a[i]
m -= a[i] * cnt
s += cnt
else:
m += a[i] * s
s = 0
print(m)
class Codeforces:
pass
class ProjectEuler:
@staticmethod
def p1():
def f(n, x):
return (x + n // x * x) * (n // x) // 2
n = 1000
ans = f(n - 1, 3) + f(n - 1, 5) - f(n - 1, 15)
print(ans)
@staticmethod
def p2():
fib = [1, 2]
while fib[-1] < 4 * 10**6:
fib.append(fib[-1] + fib[-2])
print(sum(fib[1:-1:3]))
@staticmethod
def p3():
number_theory = NumberTheory()
res = number_theory.prime_factorize(600851475143)
print(max(res.keys()))
@staticmethod
def p4():
def is_palindrome(n):
n = str(n)
return n == n[::-1]
cand = []
for a in range(100, 1000):
for b in range(a, 1000):
n = a * b
if is_palindrome(n):
cand.append(n)
print(max(cand))
@staticmethod
def p5():
number_theory = NumberTheory()
res = defaultdict(int)
for i in range(1, 21):
for p, c in number_theory.prime_factorize(i).items():
res[p] = max(res[p], c)
ans = 1
for p, c in res.items():
ans *= pow(p, c)
print(ans)
@staticmethod
def p6():
a = np.arange(101)
b = np.cumsum(a**2)
a = a.cumsum()
print(a[100] ** 2 - b[100])
@staticmethod
def p7():
number_theory = NumberTheory()
print(sorted(number_theory.prime_numbers)[10000])
@staticmethod
def p8():
n = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
n = [int(d) for d in list(n)]
res = 0
for i in range(988):
x = 1
for j in range(13):
x *= n[i + j]
res = max(res, x)
print(res)
@staticmethod
def p9():
for a in range(1, 997):
for b in range(a, 998 - a):
c = 1000 - a - b
if a**2 + b**2 == c**2:
print(a * b * c)
return
@staticmethod
def p10():
number_theory = NumberTheory(2 * 10**6 - 1)
print(sum(number_theory.prime_numbers))
@staticmethod
def p11():
grid = "08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48"
# grid = np.array(grid.split(), dtype=np.int64).reshape(20, -1)
# cand = []
# for i in range(20):
# bl1 = i+3 < 20
# for j in range(20):
# bl2 = j+3 < 20
# if bl1:
# np.prod
# tmp = 1
# for d in range(4):
# tmp *= grid[i+d, j]
print(grid)
pass
class Yukicoder:
pass
if __name__ == "__main__":
AtCoder.ABC009.d()
| 32.195939
| 1,217
| 0.368661
|
import itertools
import math
import string
import sys
from bisect import bisect_left as bi_l
from bisect import bisect_right as bi_r
from collections import Counter, defaultdict, deque
from heapq import heappop, heappush
from operator import or_, xor
inf = float("inf")
from functools import lru_cache, reduce
sys.setrecursionlimit(10**6)
MOD = 10**9 + 7
global using_numpy
using_numpy = False
import networkx as nx
import numpy as np
from numba import jit
from scipy import optimize
from scipy.ndimage import distance_transform_cdt
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import (
csgraph_to_dense,
maximum_flow,
minimum_spanning_tree,
shortest_path,
)
from scipy.spatial import ConvexHull
from scipy.special import comb
class Algebra:
class Mint(int):
def __init__(self, n, mod=MOD):
self.value = n
self.mod = mod
def __str__(self):
return f"{self.value}"
def __add__(self, x):
return self.__class__((self.value + x.value) % self.mod)
def __sub__(self, x):
return self.__class__((self.value - x.value) % self.mod)
def __mul__(self, x):
return self.__class__((self.value * x.value) % self.mod)
def __pow__(self, x):
return self.__class__(pow(self.value, x.value, self.mod))
def __lt__(self, x):
return self.value < x.value
def __le__(self, x):
return self.value <= x.value
def __eq__(self, x):
return self.value == x.value
def __ne__(self, x):
return self.value != x.value
def __gt__(self, x):
return self.value > x.value
def __ge__(self, x):
return self.value >= x.value
class SemiGroup:
pass
class Monoid:
pass
class Group:
pass
class SemiRing:
pass
class Ring:
pass
@staticmethod
def identity(n):
if using_numpy:
return np.identity(n, dtype=np.int64)
else:
a = [[0] * n for _ in range(n)]
for i in range(n):
a[i][i] = 1
return a
@staticmethod
def dot(a, b):
if using_numpy:
return np.dot(a, b)
else:
assert len(a[0]) == len(b)
c = [[0] * len(b[0]) for _ in range(len(a))]
for i in range(len(a)):
for j in range(len(b[0])):
for k in range(len(b)):
c[i][j] += a[i][k] * b[k][j]
return c
@classmethod
def matrix_pow(cls, a, n, mod=10**9 + 7):
m = len(a)
b = cls.identity(m)
while n:
if n & 1:
b = cls.dot(b, a)
n >>= 1
a = cls.dot(a, a)
if using_numpy:
a %= mod
b %= mod
else:
for i in range(m):
for j in range(m):
a[i][j] %= mod
b[i][j] %= mod
return b
@staticmethod
def bitwise_dot(a, b):
if using_numpy:
return np.bitwise_xor.reduce(
a[:, None, :] & b.T[None, :, :], axis=-1
)
else:
assert len(a[0]) == len(b)
c = [[0] * len(b[0]) for _ in range(len(a))]
for i in range(len(a)):
for j in range(len(b[0])):
for k in range(len(b)):
c[i][j] ^= a[i][k] & b[k][j]
return c
@classmethod
def bitwise_mat_pow(cls, a, n):
if n == 0:
return np.eye(len(a), dtype=np.uint32) * ((1 << 32) - 1)
res = cls.bitwise_mat_pow(a, n // 2)
res = cls.bitwise_dot(res, res)
return cls.bitwise_dot(res, a) if n & 1 else res
class NumberTheory:
def __init__(self, n=2 * 10**6):
self.n = n
self.is_prime_number, self.prime_numbers = self.sieve_of_eratosthenes(
n
)
def sieve_of_eratosthenes(self, n):
if using_numpy:
sieve = np.ones(n + 1, dtype=np.int32)
sieve[:2] = 0
for i in range(2, int(n**0.5) + 1):
if sieve[i]:
sieve[i * 2 :: i] = 0
prime_numbers = np.flatnonzero(sieve)
else:
sieve = [1] * (n + 1)
sieve[0] = sieve[1] = 0
for i in range(2, int(n**0.5) + 1):
if not sieve[i]:
continue
for j in range(i * 2, n + 1, i):
sieve[j] = 0
prime_numbers = [i for i in range(2, n + 1) if sieve[i]]
return sieve, prime_numbers
def prime_factorize(self, n):
res = dict()
if n < 2:
return res
border = int(n**0.5)
for p in self.prime_numbers:
if p > border:
break
while n % p == 0:
res[p] = res.get(p, 0) + 1
n //= p
if n == 1:
return res
res[n] = 1
return res
def prime_factorize_factorial(self, n):
res = dict()
for i in range(2, n + 1):
for p, c in self.prime_factorize(i).items():
res[p] = res.get(p, 0) + c
return res
@classmethod
@lru_cache(maxsize=None)
def gcd(cls, a, b):
return cls.gcd(b, a % b) if b else abs(a)
@classmethod
def lcm(cls, a, b):
return abs(a // cls.gcd(a, b) * b)
@staticmethod
def find_divisors(n):
divisors = []
for i in range(1, int(n**0.5) + 1):
if n % i:
continue
divisors.append(i)
j = n // i
if j != i:
divisors.append(j)
return sorted(divisors)
@staticmethod
def base_convert(n, b):
if not n:
return [0]
res = []
while n:
n, r = divmod(n, b)
if r < 0:
n += 1
r -= b
res.append(r)
return res
mint = Algebra.Mint
class Combinatorics:
def __init__(self, N=10**9, n=10**6, mod=10**9 + 7):
self.mod = mod
self.make_mod_tables(N, n)
@classmethod
@lru_cache(maxsize=None)
def choose(cls, n, r, mod=None):
if r > n or r < 0:
return 0
if r == 0:
return 1
res = cls.choose(n - 1, r, mod) + cls.choose(n - 1, r - 1, mod)
if mod:
res %= mod
return res
def cumprod(self, a):
p = self.mod
l = len(a)
sql = int(np.sqrt(l) + 1)
a = np.resize(a, sql**2).reshape(sql, sql)
for i in range(sql - 1):
a[:, i + 1] *= a[:, i]
a[:, i + 1] %= p
for i in range(sql - 1):
a[i + 1] *= a[i, -1]
a[i + 1] %= p
return np.ravel(a)[:l]
def make_mod_tables(self, N, n):
p = self.mod
if using_numpy:
fac = np.arange(n + 1)
fac[0] = 1
fac = self.cumprod(fac)
ifac = np.arange(n + 1, 0, -1)
ifac[0] = pow(int(fac[-1]), p - 2, p)
ifac = self.cumprod(ifac)[n::-1]
n_choose = np.arange(N + 1, N - n, -1)
n_choose[0] = 1
n_choose[1:] = self.cumprod(n_choose[1:]) * ifac[1 : n + 1] % p
else:
fac = [None] * (n + 1)
fac[0] = 1
for i in range(n):
fac[i + 1] = fac[i] * (i + 1) % p
ifac = [None] * (n + 1)
ifac[n] = pow(fac[n], p - 2, p)
for i in range(n, 0, -1):
ifac[i - 1] = ifac[i] * i % p
n_choose = [None] * (n + 1)
n_choose[0] = 1
for i in range(n):
n_choose[i + 1] = n_choose[i] * (N - i) % p
for i in range(n + 1):
n_choose[i] = n_choose[i] * ifac[i] % p
self.fac, self.ifac, self.mod_n_choose = fac, ifac, n_choose
def mod_choose(self, n, r):
p = self.mod
return self.fac[n] * self.ifac[r] % p * self.ifac[n - r] % p
@classmethod
def permutations(cls, a, r=None, i=0):
a = list(a)
n = len(a)
if r is None:
r = n
res = []
if r > n or i > r:
return res
if i == r:
return [tuple(a[:r])]
for j in range(i, n):
a[i], a[j] = a[j], a[i]
res += cls.permutations(a, r, i + 1)
return res
@staticmethod
def combinations(a, r):
a = tuple(a)
n = len(a)
if r > n:
return
indices = list(range(r))
yield a[:r]
while True:
for i in range(r - 1, -1, -1):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i + 1, r):
indices[j] = indices[j - 1] + 1
yield tuple(a[i] for i in indices)
class String:
@staticmethod
def z_algorithm(s):
n = len(s)
a = [0] * n
a[0] = n
l = r = -1
for i in range(1, n):
if r >= i:
a[i] = min(a[i - l], r - i)
while i + a[i] < n and s[i + a[i]] == s[a[i]]:
a[i] += 1
if i + a[i] >= r:
l, r = i, i + a[i]
return a
class GeometryTopology:
class Graph:
def __init__(self, nodes={}, edges={}):
self.nodes = nodes
self.edges = edges
def add_node(self, v, **info):
if not v in self.edges:
self.edges[v] = {}
if v in self.nodes:
return
self.nodes[v] = info
def add_edge(self, u, v, **info):
self.add_node(u)
self.add_node(v)
self.edges[u][v] = info
def get_size(self):
return len(self.nodes)
def dinic(self, src, sink):
def bfs():
lv = {src: 0}
q = deque([src])
while q:
u = q.popleft()
for v, e in self.edges[u].items():
if e["capacity"] == 0 or v in lv:
continue
lv[v] = lv[u] + 1
q.append(v)
return lv
def flow_to_sink(u, flow_in):
if u == sink:
return flow_in
flow = 0
for v, e in self.edges[u].items():
cap = e["capacity"]
if cap == 0 or lv[v] <= lv[u]:
continue
f = flow_to_sink(v, min(flow_in, cap))
if not f:
continue
self.edges[u][v]["capacity"] -= f
if v in self.edges and u in self.edges[v]:
self.edges[v][u]["capacity"] += f
else:
self.add_edge(v, u, capacity=f)
flow_in -= f
flow += f
return flow
flow = 0
while True:
lv = bfs()
if not sink in lv:
return flow
flow += flow_to_sink(src, inf)
def ford_fulkerson(self):
pass
def push_relabel(self):
pass
def floyd_warshall(self):
d = {u: {v: inf for v in self.nodes} for u in self.nodes}
for v in self.nodes:
d[v][v] = 0
for u in self.edges:
for v in self.edges[u]:
d[u][v] = self.edges[u][v]["weight"]
for w in self.nodes:
for u in self.nodes:
for v in self.nodes:
d[u][v] = min(d[u][v], d[u][w] + d[w][v])
return d
def dijkstra(self, src, paths_cnt=False, mod=None):
dist = {v: inf for v in self.nodes}
dist[src] = 0
visited = set()
paths = {v: 0 for v in self.nodes}
paths[src] = 1
q = [(0, src)]
while q:
d, u = heappop(q)
if u in visited:
continue
visited.add(u)
for v, e in self.edges[u].items():
dv = d + e["weight"]
if dv > dist[v]:
continue
elif dv == dist[v]:
paths[v] += paths[u]
if mod:
paths[v] %= mod
continue
paths[v] = paths[u]
dist[v] = dv
heappush(q, (dv, v))
if paths_cnt:
return dist, paths
else:
return dist
def astar(self, src, tgt, heuristic_func):
cost = {v: inf for v in self.nodes}
q = [(heuristic_func(src, tgt), 0, src)]
while q:
s, c, u = heappop(q)
if u == tgt:
return c
if cost[u] != inf:
continue
cost[u] = c
for v, e in self.edges[u].items():
if cost[v] != inf:
continue
h = heuristic_func(v, tgt)
nc = c + e["weight"]
heappush(q, (h + nc, nc, v))
return inf
def init_tree(self, root=0):
self.depth = {root: 0}
self.dist = {root: 0}
self.ancestors = [{root: root}]
stack = [root]
while stack:
u = stack.pop()
for v, e in self.edges[u].items():
if v == self.ancestors[0][u]:
continue
self.dist[v] = self.dist[u] + e["weight"]
self.depth[v] = self.depth[u] + 1
self.ancestors[0][v] = u
stack.append(v)
for _ in range(max(self.depth).bit_length()):
ancestor = self.ancestors[-1]
nxt_ancestor = {v: ancestor[ancestor[v]] for v in self.nodes}
self.ancestors.append(nxt_ancestor)
def find_dist(self, u, v):
return (
self.dist[u]
+ self.dist[v]
- 2 * self.dist[self.find_lca(u, v)]
)
def find_lca(self, u, v):
du, dv = self.depth[u], self.depth[v]
if du > dv:
u, v = v, u
du, dv = dv, du
d = dv - du
for i in range((d).bit_length()):
if d >> i & 1:
v = self.ancestors[i][v]
if v == u:
return v
for i in range(
du.bit_length() - 1, -1, -1
):
nu, nv = self.ancestors[i][u], self.ancestors[i][v]
if nu == nv:
continue
u, v = nu, nv
return self.ancestors[0][u]
@staticmethod
def triangle_area(p0, p1, p2, signed=False):
x1, y1, x2, y2 = (
p1[0] - p0[0],
p1[1] - p0[1],
p2[0] - p0[0],
p2[1] - p0[1],
)
return (
(x1 * y2 - x2 * y1) / 2 if signed else abs(x1 * y2 - x2 * y1) / 2
)
@classmethod
def intersect(cls, seg1, seg2):
(p1, p2), (p3, p4) = seg1, seg2
t1 = cls.triangle_area(p1, p2, p3, signed=True)
t2 = cls.triangle_area(p1, p2, p4, signed=True)
t3 = cls.triangle_area(p3, p4, p1, signed=True)
t4 = cls.triangle_area(p3, p4, p2, signed=True)
return (t1 * t2 < 0) & (t3 * t4 < 0)
class UnionFind:
def __init__(self, n=10**6):
self.root = list(range(n))
self.height = [0] * n
self.size = [1] * n
def find_root(self, u):
if self.root[u] == u:
return u
self.root[u] = self.find_root(self.root[u])
return self.root[u]
def unite(self, u, v):
ru = self.find_root(u)
rv = self.find_root(v)
if ru == rv:
return
hu = self.height[ru]
hv = self.height[rv]
if hu >= hv:
self.root[rv] = ru
self.size[ru] += self.size[rv]
self.height[ru] = max(hu, hv + 1)
else:
self.root[ru] = rv
self.size[rv] += self.size[ru]
def cumxor(a):
return reduce(xor, a, 0)
def cumor(a):
return reduce(or_, a, 0)
def bit_count(n):
cnt = 0
while n:
cnt += n & 1
n >>= 1
return cnt
class AtCoder:
class ABC001:
@staticmethod
def a():
h1, h2 = map(int, sys.stdin.read().split())
print(h1 - h2)
@staticmethod
def d():
def to_minuites(x):
q, r = divmod(x, 100)
return 60 * q + r
def to_hmform(x):
q, r = divmod(x, 60)
return 100 * q + r
n = int(sys.stdin.readline().rstrip())
term = [0] * 2001
for _ in range(n):
s, e = map(
to_minuites,
map(int, sys.stdin.readline().rstrip().split("-")),
)
s = s // 5 * 5
e = (e + 4) // 5 * 5
term[s] += 1
term[e + 1] -= 1
for i in range(2000):
term[i + 1] += term[i]
res = []
raining = False
for i in range(2001):
if term[i]:
if not raining:
s = i
raining = True
elif raining:
res.append((s, i - 1))
raining = False
for s, e in res:
print(f"{to_hmform(s):04}-{to_hmform(e):04}")
class ABC002:
@staticmethod
def a():
print(max(map(int, sys.stdin.readline().split())))
@staticmethod
def b():
vowels = set("aeiou")
print(
"".join(
[
c
for c in sys.stdin.readline().rstrip()
if c not in vowels
]
)
)
@staticmethod
def c():
print(
GeometryTopology.triangle_area(
*map(int, sys.stdin.readline().split())
)
)
@staticmethod
def d():
n, m = map(int, sys.stdin.readline().split())
edges = set(
(x - 1, y - 1)
for x, y in zip(*[map(int, sys.stdin.read().split())] * 2)
)
print(
max(
len(s)
for i in range(1, 1 << n)
for s in [[j for j in range(n) if i >> j & 1]]
if all(
(x, y) in edges
for x, y in itertools.combinations(s, 2)
)
)
)
@staticmethod
def d_2():
n, m = map(int, sys.stdin.readline().split())
relations = [1 << i for i in range(n)]
for x, y in zip(*[map(int, sys.stdin.read().split())] * 2):
x -= 1
y -= 1
relations[x] |= 1 << y
relations[y] |= 1 << x
res = 0
for i in range(1 << n):
cnt = 0
s = 0
t = (1 << n) - 1
for j in range(n):
if i >> j & 1:
s |= 1 << j
t &= relations[j]
cnt += 1
if t & s == s:
res = max(res, cnt)
print(res)
class ABC003:
@staticmethod
def a():
print((int(sys.stdin.readline().rstrip()) + 1) * 5000)
@staticmethod
def b():
atcoder = set("atcoder")
s, t = sys.stdin.read().split()
print(
all(
s[i] == t[i]
or s[i] == "@"
and t[i] in atcoder
or t[i] == "@"
and s[i] in atcoder
for i in range(len(s))
)
and "You can win"
or "You will lose"
)
@staticmethod
def c():
n, k, *r = map(int, sys.stdin.read().split())
print(reduce(lambda x, y: (x + y) / 2, sorted(r)[-k:], 0))
class ABC004:
@staticmethod
def a():
print(int(sys.stdin.readline().rstrip()) * 2)
@staticmethod
def b():
for l in [sys.stdin.readline().rstrip() for _ in range(4)][::-1]:
print(l[::-1])
@staticmethod
def c():
n = int(sys.stdin.readline().rstrip()) % 30
res = list(range(1, 7))
for i in range(n):
i %= 5
res[i], res[i + 1] = res[i + 1], res[i]
print(*res, sep="")
class ABC005:
@staticmethod
def a():
x, y = map(int, sys.stdin.readline().split())
print(y // x)
@staticmethod
def b():
n, *t = map(int, sys.stdin.read().split())
print(min(t))
@staticmethod
def c():
t = int(sys.stdin.readline().rstrip())
n = int(sys.stdin.readline().rstrip())
a = [int(x) for x in sys.stdin.readline().split()]
m = int(sys.stdin.readline().rstrip())
b = [int(x) for x in sys.stdin.readline().split()]
i = 0
for p in b:
if i == n:
print("no")
return
while p - a[i] > t:
i += 1
if i == n:
print("no")
return
if a[i] > p:
print("no")
return
i += 1
print("yes")
@staticmethod
def d():
n = int(sys.stdin.readline().rstrip())
d = np.array(
[sys.stdin.readline().split() for _ in range(n)], np.int64
)
s = d.cumsum(axis=0).cumsum(axis=1)
s = np.pad(s, 1)
max_del = np.zeros((n + 1, n + 1), dtype=np.int64)
for y in range(1, n + 1):
for x in range(1, n + 1):
max_del[y, x] = np.amax(
s[y : n + 1, x : n + 1]
- s[0 : n - y + 1, x : n + 1]
- s[y : n + 1, 0 : n - x + 1]
+ s[0 : n - y + 1, 0 : n - x + 1]
)
res = np.arange(n**2 + 1)[:, None]
i = np.arange(1, n + 1)
res = max_del[i, np.minimum(res // i, n)].max(axis=1)
q = int(sys.stdin.readline().rstrip())
p = np.array(sys.stdin.read().split(), dtype=np.int64)
print(*res[p], sep="\n")
class ABC006:
@staticmethod
def a():
n = sys.stdin.readline().rstrip()
if "3" in n:
print("YES")
elif int(n) % 3 == 0:
print("YES")
else:
print("NO")
@staticmethod
def b():
mod = 10007
a = np.eye(N=3, k=-1, dtype=np.int64)
a[0] = 1
n = int(sys.stdin.readline().rstrip())
a = Algebra.matrix_pow(a, n - 1, mod)
print(a[2][0])
@staticmethod
def c():
n, m = map(int, sys.stdin.readline().split())
cnt = [0, 0, 0]
if m == 1:
cnt = [-1, -1, -1]
else:
if m & 1:
m -= 3
cnt[1] += 1
n -= 1
cnt[2] = m // 2 - n
cnt[0] = n - cnt[2]
if cnt[0] < 0 or cnt[1] < 0 or cnt[2] < 0:
print(-1, -1, -1)
else:
print(*cnt, sep=" ")
@staticmethod
def d():
n, *c = map(int, sys.stdin.read().split())
lis = [inf] * n
for x in c:
lis[bi_l(lis, x)] = x
print(n - bi_l(lis, inf))
class ABC007:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
print(n - 1)
@staticmethod
def b():
s = sys.stdin.readline().rstrip()
if s == "a":
print(-1)
else:
print("a")
@staticmethod
def c():
r, c = map(int, sys.stdin.readline().split())
sy, sx = map(int, sys.stdin.readline().split())
gy, gx = map(int, sys.stdin.readline().split())
sy -= 1
sx -= 1
gy -= 1
gx -= 1
maze = [sys.stdin.readline().rstrip() for _ in range(r)]
queue = deque([(sy, sx)])
dist = np.full((r, c), np.inf)
dist[sy, sx] = 0
while queue:
y, x = queue.popleft()
for i, j in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
i += y
j += x
if maze[i][j] == "#" or dist[i, j] != np.inf:
continue
dist[i, j] = dist[y, x] + 1
queue.append((i, j))
print(int(dist[gy, gx]))
@staticmethod
def d():
ng = set([4, 9])
def count(d):
return d if d <= 4 else d - 1
def f(n):
x = [int(d) for d in str(n)]
flg = True
dp = 0
for d in x:
dp = dp * 8 + flg * count(d)
if d in ng:
flg = False
return n - (dp + flg)
a, b = map(int, sys.stdin.readline().split())
print(f(b) - f(a - 1))
class ABC008:
@staticmethod
def a():
s, t = map(int, sys.stdin.readline().split())
print(t - s + 1)
@staticmethod
def b():
n, *s = sys.stdin.read().split()
res = defaultdict(int)
for name in s:
res[name] += 1
print(sorted(res.items(), key=lambda x: x[1])[-1][0])
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
a = np.array(a)
c = n - np.count_nonzero(a[:, None] % a, axis=1)
print(np.sum((c + 1) // 2 / c))
@staticmethod
def d():
w, h, n, *xy = map(int, sys.stdin.read().split())
(*xy,) = zip(*([iter(xy)] * 2))
@lru_cache(maxsize=None)
def count(x1, y1, x2, y2):
res = 0
for x, y in xy:
if not (x1 <= x <= x2 and y1 <= y <= y2):
continue
cnt = (x2 - x1) + (y2 - y1) + 1
cnt += count(x1, y1, x - 1, y - 1)
cnt += count(x1, y + 1, x - 1, y2)
cnt += count(x + 1, y1, x2, y - 1)
cnt += count(x + 1, y + 1, x2, y2)
res = max(res, cnt)
return res
print(count(1, 1, w, h))
class ABC009:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
print((n + 1) // 2)
@staticmethod
def b():
n, *a = map(int, sys.stdin.read().split())
print(sorted(set(a))[-2])
@staticmethod
def c():
n, k = map(int, sys.stdin.readline().split())
s = list(sys.stdin.readline().rstrip())
cost = [1] * n
r = k
for i in range(n - 1):
q = []
for j in range(i + 1, n):
if s[j] < s[i] and cost[i] + cost[j] <= r:
heappush(q, (s[j], cost[i] + cost[j], -j))
if not q:
continue
_, c, j = heappop(q)
j = -j
s[i], s[j] = s[j], s[i]
r -= c
cost[i] = cost[j] = 0
print("".join(s))
@staticmethod
def d():
k, m = map(int, sys.stdin.readline().split())
a = np.array([int(x) for x in sys.stdin.readline().split()])
c = np.array([int(x) for x in sys.stdin.readline().split()])
mask = (1 << 32) - 1
d = np.eye(k, k, -1, dtype=np.uint32) * mask
d[0] = c
if m <= k:
print(a[m - 1])
return
print(
Algebra.bitwise_dot(
Algebra.bitwise_mat_pow(d, m - k), a[::-1].reshape(-1, 1)
)[0][0]
)
class ABC010:
@staticmethod
def a():
print(sys.stdin.readline().rstrip() + "pp")
@staticmethod
def b():
n, *a = map(int, sys.stdin.read().split())
tot = 0
for x in a:
c = 0
while x % 2 == 0 or x % 3 == 2:
x -= 1
c += 1
tot += c
print(tot)
@staticmethod
def c():
sx, sy, gx, gy, t, v, n, *xy = map(int, sys.stdin.read().split())
x, y = np.array(xy).reshape(-1, 2).T
def dist(x1, y1, x2, y2):
return np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
ans = (
"YES"
if (dist(sx, sy, x, y) + dist(x, y, gx, gy) <= v * t).any()
else "NO"
)
print(ans)
@staticmethod
def d():
n, g, e = map(int, sys.stdin.readline().split())
p = [int(x) for x in sys.stdin.readline().split()]
x, y = [], []
for _ in range(e):
a, b = map(int, sys.stdin.readline().split())
x.append(a)
y.append(b)
x.append(b)
y.append(a)
for a in p:
x.append(a)
y.append(n)
if not x:
print(0)
return
c = [1] * len(x)
min_cut = maximum_flow(
csr_matrix((c, (x, y)), (n + 1, n + 1)), source=0, sink=n
).flow_value
print(min_cut)
@staticmethod
def d_2():
n, g, e = map(int, sys.stdin.readline().split())
graph = nx.DiGraph()
graph.add_nodes_from(range(n + 1))
for p in [int(x) for x in sys.stdin.readline().split()]:
graph.add_edge(p, n, capacity=1)
for _ in range(e):
a, b = map(int, sys.stdin.readline().split())
graph.add_edge(a, b, capacity=1)
graph.add_edge(b, a, capacity=1)
print(nx.minimum_cut_value(graph, 0, n))
@staticmethod
def d_3():
n, g, e = map(int, sys.stdin.readline().split())
graph = GeometryTopology.Graph()
for i in range(n + 1):
graph.add_node(i)
for p in [int(x) for x in sys.stdin.readline().split()]:
graph.add_edge(p, n, capacity=1)
for a, b in zip(*[map(int, sys.stdin.read().split())] * 2):
graph.add_edge(a, b, capacity=1)
graph.add_edge(b, a, capacity=1)
print(graph.dinic(0, n))
class ABC011:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
print(n % 12 + 1)
@staticmethod
def b():
s = sys.stdin.readline().rstrip()
print(s[0].upper() + s[1:].lower())
@staticmethod
def c():
n, *ng = map(int, sys.stdin.read().split())
ng = set(ng)
if n in ng:
print("NO")
else:
r = 100
while n > 0:
if r == 0:
print("NO")
return
for i in range(3, 0, -1):
if (n - i) in ng:
continue
n -= i
r -= 1
break
else:
print("NO")
return
print("YES")
@staticmethod
def d():
n, d, x, y = map(int, sys.stdin.read().split())
x, y = abs(x), abs(y)
if x % d or y % d:
print(0)
return
x, y = x // d, y // d
r = n - (x + y)
if r < 0 or r & 1:
print(0)
return
res = 0
half_p = pow(1 / 2, n)
for d in range(r // 2 + 1):
south, north = d, y + d
west = (r - 2 * d) // 2
res += (
half_p
* comb(n, south, exact=True)
* comb(n - south, north, exact=True)
* comb(n - south - north, west, exact=True)
* half_p
)
print(res)
class ABC012:
@staticmethod
def a():
a, b = map(int, sys.stdin.readline().split())
print(b, a)
@staticmethod
def b():
n = int(sys.stdin.readline().rstrip())
h, n = divmod(n, 3600)
m, s = divmod(n, 60)
print(f"{h:02}:{m:02}:{s:02}")
@staticmethod
def c():
n = 2025 - int(sys.stdin.readline().rstrip())
res = []
for i in range(1, 10):
if n % i != 0 or n // i > 9:
continue
res.append(f"{i} x {n//i}")
print(*sorted(res), sep="\n")
@staticmethod
def d():
n, m, *abt = map(int, sys.stdin.read().split())
a, b, t = np.array(abt).reshape(m, 3).T
res = shortest_path(
csr_matrix((t, (a - 1, b - 1)), (n, n)),
method="FW",
directed=False,
)
print(res.max(axis=-1).min().astype(np.int64))
@staticmethod
def d_2():
n, m, *abt = map(int, sys.stdin.read().split())
graph = GeometryTopology.Graph()
for a, b, t in zip(*[iter(abt)] * 3):
a -= 1
b -= 1
graph.add_edge(a, b, weight=t)
graph.add_edge(b, a, weight=t)
dist = graph.floyd_warshall()
res = min([max(tmp.values()) for tmp in dist.values()])
print(res)
class ABC013:
@staticmethod
def a():
print(ord(sys.stdin.readline().rstrip()) - ord("A") + 1)
@staticmethod
def b():
a, b = map(int, sys.stdin.read().split())
d = abs(a - b)
print(min(d, 10 - d))
@staticmethod
def c():
n, h, a, b, c, d, e = map(int, sys.stdin.read().split())
y = np.arange(n + 1)
x = (n * e - h - (d + e) * y) // (b + e) + 1
np.maximum(x, 0, out=x)
np.minimum(x, n - y, out=x)
print(np.amin(a * x + c * y))
@staticmethod
def d():
n, m, d, *a = map(int, sys.stdin.read().split())
res = list(range(n))
def swap(i, j):
res[i], res[j] = res[j], res[i]
for i in a[::-1]:
swap(i - 1, i)
group = [None] * n
root = [None] * n
index_in_group = [None] * n
for i in range(n):
if root[i] is not None:
continue
group[i] = []
j = i
for cnt in range(1, n + 1):
index_in_group[j] = cnt - 1
group[i].append(j)
j = res[j]
root[j] = i
if j == i:
break
for i in range(n):
g = group[root[i]]
print(g[(index_in_group[i] + d) % len(g)] + 1)
class ABC014:
@staticmethod
def a():
a, b = map(int, sys.stdin.read().split())
print((a + b - 1) // b * b - a)
@staticmethod
def b():
n, x, *a = map(int, sys.stdin.read().split())
print(sum(a[i] for i in range(n) if x >> i & 1))
@staticmethod
def c():
n, *ab = map(int, sys.stdin.read().split())
a, b = np.array(ab).reshape(n, 2).T
res = np.zeros(10**6 + 2, dtype=np.int64)
np.add.at(res, a, 1)
np.subtract.at(res, b + 1, 1)
np.cumsum(res, out=res)
print(res.max())
@staticmethod
def d():
n = int(sys.stdin.readline().rstrip())
g = GeometryTopology.Graph()
for _ in range(n - 1):
x, y = map(int, sys.stdin.readline().split())
x -= 1
y -= 1
g.add_edge(x, y, weight=1)
g.add_edge(y, x, weight=1)
g.init_tree()
q, *ab = map(int, sys.stdin.read().split())
for a, b in zip(*[iter(ab)] * 2):
a -= 1
b -= 1
print(g.find_dist(a, b) + 1)
class ABC015:
@staticmethod
def a():
a, b = sys.stdin.read().split()
print(a if len(a) > len(b) else b)
@staticmethod
def b():
n, *a = map(int, sys.stdin.read().split())
a = np.array(a)
print(
np.ceil(
a[np.nonzero(a)[0]].sum() / np.count_nonzero(a)
).astype(np.int8)
)
@staticmethod
def c():
n, k, *t = map(int, sys.stdin.read().split())
t = np.array(t).reshape(n, k)
x = np.zeros((1, 1), dtype=np.int8)
for i in range(n):
x = x.reshape(-1, 1) ^ t[i]
print("Found" if np.count_nonzero(x == 0) > 0 else "Nothing")
@staticmethod
def d():
w, n, k, *ab = map(int, sys.stdin.read().split())
dp = np.zeros((k + 1, w + 1), dtype=np.int32)
for a, b in zip(*[iter(ab)] * 2):
prev = dp.copy()
np.maximum(dp[1:, a:], prev[:-1, :-a] + b, out=dp[1:, a:])
print(dp[k][w])
class ABC016:
@staticmethod
def a():
m, d = map(int, sys.stdin.readline().split())
print("YES" if m % d == 0 else "NO")
@staticmethod
def b():
a, b, c = map(int, sys.stdin.readline().split())
f1, f2 = a + b == c, a - b == c
if f1 & f2:
print("?")
elif f1 & (~f2):
print("+")
elif (~f1) & f2:
print("-")
else:
print("!")
@staticmethod
def c():
n, _, *ab = map(int, sys.stdin.read().split())
friends = [0] * n
for a, b in zip(*[iter(ab)] * 2):
a -= 1
b -= 1
friends[a] |= 1 << b
friends[b] |= 1 << a
res = [
bit_count(
cumor(friends[j] for j in range(n) if friends[i] >> j & 1)
& ~(friends[i] | 1 << i)
)
for i in range(n)
]
print(*res, sep="\n")
@staticmethod
def d():
sx, sy, gx, gy = map(int, sys.stdin.readline().split())
seg1 = ((sx, sy), (gx, gy))
n = int(sys.stdin.readline().rstrip())
p1 = (
np.array(sys.stdin.read().split(), dtype=np.int64)
.reshape(n, 2)
.T
)
p2 = np.hstack((p1[:, 1:], p1[:, :1]))
seg2 = (p1, p2)
print(
np.count_nonzero(GeometryTopology.intersect(seg1, seg2)) // 2
+ 1
)
class ABC017:
@staticmethod
def a():
s, e = (
np.array(sys.stdin.read().split(), dtype=np.int16)
.reshape(3, 2)
.T
)
print((s // 10 * e).sum())
@staticmethod
def b():
choku_tail = set("ch, o, k, u".split(", "))
def is_choku(s):
if s == "":
return True
if len(s) >= 1 and (s[-1] in choku_tail) and is_choku(s[:-1]):
return True
if len(s) >= 2 and (s[-2:] in choku_tail) and is_choku(s[:-2]):
return True
return False
print("YES" if is_choku(sys.stdin.readline().rstrip()) else "NO")
@staticmethod
def c():
n, m, *lrs = map(int, sys.stdin.read().split())
l, r, s = np.array(lrs).reshape(n, 3).T
score = np.zeros((m + 1,), dtype=np.int32)
np.add.at(score, l - 1, s)
np.subtract.at(score, r, s)
np.cumsum(score, out=score)
print(s.sum() - score[:m].min())
@staticmethod
def d():
n, m, *f = map(int, sys.stdin.read().split())
prev = [0] * (n + 1)
tmp = defaultdict(int)
for i in range(n):
prev[i + 1] = tmp[f[i]]
tmp[f[i]] = i + 1
dp = [0] * (n + 1)
dp[0] = 1
l, s = 0, dp[0]
for i in range(1, n + 1):
while l < prev[i]:
s = (s - dp[l]) % MOD
l += 1
dp[i] = s
s = (s + dp[i]) % MOD
print(dp[n])
class ABC018:
@staticmethod
def a():
(*a,) = map(int, sys.stdin.read().split())
a = sorted(enumerate(a), key=lambda x: -x[1])
res = [None] * 3
for i in range(3):
res[a[i][0]] = i + 1
print(*res, sep="\n")
@staticmethod
def b():
s = sys.stdin.readline().rstrip()
n, *lr = map(int, sys.stdin.read().split())
for l, r in zip(*[iter(lr)] * 2):
l -= 1
r -= 1
s = s[:l] + s[l : r + 1][::-1] + s[r + 1 :]
print(s)
@staticmethod
def c():
r, c, k = map(int, sys.stdin.readline().split())
s = np.array([list(s) for s in sys.stdin.read().split()])
s = np.pad(s, 1, constant_values="x")
a = np.zeros_like(s, dtype=np.float64)
a[s == "o"] = np.inf
for i in range(1, r + 1):
np.minimum(a[i - 1, :] + 1, a[i, :], out=a[i, :])
for i in range(r, 0, -1):
np.minimum(a[i + 1, :] + 1, a[i, :], out=a[i, :])
for j in range(1, c + 1):
np.minimum(a[:, j - 1] + 1, a[:, j], out=a[:, j])
for j in range(c, 0, -1):
np.minimum(a[:, j + 1] + 1, a[:, j], out=a[:, j])
print(np.count_nonzero(a >= k))
@staticmethod
def c_2():
r, c, k = map(int, sys.stdin.readline().split())
s = np.array([list(s) for s in sys.stdin.read().split()])
s = np.pad(s, 1, constant_values="x")
a = (s == "o").astype(np.int16)
a = distance_transform_cdt(a, metric="taxicab")
print(np.count_nonzero(a >= k))
@staticmethod
def d():
n, m, p, q, r, *xyz = map(int, sys.stdin.read().split())
x, y, z = np.array(xyz).reshape(r, 3).T
h = np.zeros((n, m), dtype=np.int32)
h[x - 1, y - 1] = z
g = np.array([*itertools.combinations(range(n), p)])
print(np.sort(h[g].sum(axis=1), axis=1)[:, -q:].sum(axis=1).max())
class ABC019:
@staticmethod
def a():
(*a,) = map(int, sys.stdin.readline().split())
print(sorted(a)[1])
@staticmethod
def b():
s = sys.stdin.readline().rstrip() + "$"
cnt = 0
prev = "$"
t = ""
for c in s:
if c == prev:
cnt += 1
continue
t += prev + str(cnt)
prev = c
cnt = 1
print(t[2:])
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
res = set()
for x in a:
while not x & 1:
x >>= 1
res.add(x)
print(len(res))
@staticmethod
def d():
def inquire(u, v):
print(f"? {u} {v}".format(u, v), flush=True)
return int(sys.stdin.readline().rstrip())
n = int(sys.stdin.readline().rstrip())
u = sorted([(inquire(1, v), v) for v in range(2, n + 1)])[-1][1]
d = max((inquire(u, v)) for v in range(1, n + 1) if u != v)
print(f"! {d}")
class ABC020:
@staticmethod
def a():
print(
"ABC"
if int(sys.stdin.readline().rstrip()) == 1
else "chokudai"
)
@staticmethod
def b():
a, b = sys.stdin.readline().split()
print(int(a + b) * 2)
@staticmethod
def c():
h, w, t = map(int, sys.stdin.readline().split())
s = [list(s) for s in sys.stdin.read().split()]
for i in range(h):
for j in range(w):
if s[i][j] == "S":
sy, sx = i, j
if s[i][j] == "G":
gy, gx = i, j
s[sy][sx] = s[gy][gx] = "."
source, target = (sy, sx), (gy, gx)
def heuristic_function(u, v=target):
return abs(v[0] - u[0]) + abs(v[1] - u[0])
def min_time(x):
graph = GeometryTopology.Graph()
for i in range(h):
for j in range(w):
graph.add_node((i, j))
for i in range(h):
for j in range(w):
if i > 0:
graph.add_edge(
(i, j),
(i - 1, j),
weight=(1 if s[i - 1][j] == "." else x),
)
if i < h - 1:
graph.add_edge(
(i, j),
(i + 1, j),
weight=(1 if s[i + 1][j] == "." else x),
)
if j > 0:
graph.add_edge(
(i, j),
(i, j - 1),
weight=(1 if s[i][j - 1] == "." else x),
)
if j < w - 1:
graph.add_edge(
(i, j),
(i, j + 1),
weight=(1 if s[i][j + 1] == "." else x),
)
return graph.dijkstra(source)[target]
graph = nx.DiGraph()
for i in range(h):
for j in range(w):
if i > 0:
graph.add_edge(
(i, j),
(i - 1, j),
weight=(1 if s[i - 1][j] == "." else x),
)
if i < h - 1:
graph.add_edge(
(i, j),
(i + 1, j),
weight=(1 if s[i + 1][j] == "." else x),
)
if j > 0:
graph.add_edge(
(i, j),
(i, j - 1),
weight=(1 if s[i][j - 1] == "." else x),
)
if j < w - 1:
graph.add_edge(
(i, j),
(i, j + 1),
weight=(1 if s[i][j + 1] == "." else x),
)
return nx.dijkstra_path_length(graph, source, target)
return nx.astar_path_length(
graph, source, target, heuristic_function
)
def binary_search():
lo, hi = 1, t + 1
while lo + 1 < hi:
x = (lo + hi) // 2
if min_time(x) > t:
hi = x
else:
lo = x
return lo
print(binary_search())
@staticmethod
def d():
n, k = map(int, sys.stdin.readline().split())
div = sorted(NumberTheory.find_divisors(k))
l = len(div)
s = [0] * l
for i, d in enumerate(div):
s[i] = (1 + n // d) * (n // d) // 2 * d % MOD
for i in range(l - 1, -1, -1):
for j in range(i + 1, l):
if div[j] % div[i]:
continue
s[i] = (s[i] - s[j]) % MOD
print(
sum(s[i] * k // div[i] % MOD for i in range(l)) % MOD
)
class ABC021:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
s = [1 << i for i in range(5) if n >> i & 1]
print(len(s), *s, sep="\n")
@staticmethod
def b():
n, a, b, k, *p = map(int, sys.stdin.read().split())
print("YES" if len(set(p) | set([a, b])) == k + 2 else "NO")
@staticmethod
def c():
n, a, b, m, *xy = map(int, sys.stdin.read().split())
x, y = np.array(xy).reshape(m, 2).T - 1
a -= 1
b -= 1
g = csgraph_to_dense(
csr_matrix((np.ones(m), (x, y)), (n, n), dtype=np.int8)
)
g = np.logical_or(g, g.T)
paths = np.zeros(n, dtype=np.int64).reshape(-1, 1)
paths[a, 0] = 1
while not paths[b, 0]:
paths = np.dot(g, paths) % MOD
print(paths[b, 0])
@staticmethod
def c_2():
n, a, b, m, *xy = map(int, sys.stdin.read().split())
a -= 1
b -= 1
g = GeometryTopology.Graph()
for x, y in zip(*[iter(xy)] * 2):
x -= 1
y -= 1
g.add_edge(x, y, weight=1)
g.add_edge(y, x, weight=1)
dist, paths = g.dijkstra(a, paths_cnt=True, mod=MOD)
print(paths[b])
@staticmethod
def d():
n, k = map(int, sys.stdin.read().split())
combinatorics = Combinatorics()
print(combinatorics.mod_choose(n + k - 1, k))
class ABC022:
@staticmethod
def a():
n, s, t, *a = map(int, sys.stdin.read().split())
a = np.array(a)
np.cumsum(a, out=a)
print(((s <= a) & (a <= t)).sum())
@staticmethod
def b():
n, *a = map(int, sys.stdin.read().split())
c = Counter(a)
print(sum(c.values()) - len(c))
@staticmethod
def c():
n, m, *uvl = map(int, sys.stdin.read().split())
u, v, l = np.array(uvl).reshape(m, 3).T
u -= 1
v -= 1
g = csgraph_to_dense(csr_matrix((l, (u, v)), (n, n)))
g += g.T
g[g == 0] = np.inf
dist0 = g[0].copy()
g[0] = 0
g[:, 0] = 0
dist = shortest_path(g, method="FW", directed=False)
u, v = np.array([*itertools.combinations(range(1, n), 2)]).T
res = (dist0[u] + dist[u, v] + dist0[v]).min()
print(-1 if res == np.inf else int(res))
@staticmethod
def d():
n, *ab = map(int, sys.stdin.read().split())
c = np.array(ab).reshape(2, n, 2)
g = c.mean(axis=1)
d = np.sqrt(((c - g[:, None, :]) ** 2).sum(axis=-1)).sum(axis=1)
print(d[1] / d[0])
class ABC023:
@staticmethod
def a():
print(sum(divmod(int(sys.stdin.readline().rstrip()), 10)))
@staticmethod
def b():
n, s = sys.stdin.read().split()
n = int(n)
t = "b"
for i in range(n // 2):
if i % 3 == 0:
t = "a" + t + "c"
elif i % 3 == 1:
t = "c" + t + "a"
else:
t = "b" + t + "b"
print(n // 2 if t == s else -1)
@staticmethod
def b_2():
n, s = sys.stdin.read().split()
n = int(n)
if n & 1 ^ 1:
print(-1)
return
a = list("abc")
i = (1 - n // 2) % 3
for c in s:
if c != a[i]:
print(-1)
return
i = (i + 1) % 3
print(n // 2)
@staticmethod
def c():
h, w, k, n, *rc = map(int, sys.stdin.read().split())
r, c = np.array(rc).reshape(n, 2).T - 1
rb = np.bincount(r, minlength=h)
cb = np.bincount(c, minlength=w)
rbb = np.bincount(rb, minlength=k + 1)
cbb = np.bincount(cb, minlength=k + 1)
tot = (rbb[: k + 1] * cbb[k::-1]).sum()
real = np.bincount(rb[r] + cb[c] - 1, minlength=k + 1)
print(tot - real[k - 1] + real[k])
@staticmethod
def d():
n, *hs = map(int, sys.stdin.read().split())
h, s = np.array(hs).reshape(n, 2).T
t = np.arange(n)
def is_ok(x):
t_lim = (x - h) // s
t_lim.sort()
return np.all(t_lim >= t)
def binary_search():
lo, hi = 0, 10**14
while lo + 1 < hi:
x = (lo + hi) // 2
if is_ok(x):
hi = x
else:
lo = x
return hi
print(binary_search())
class ABC024:
@staticmethod
def a():
a, b, c, k, s, t = map(int, sys.stdin.read().split())
print(a * s + b * t - c * (s + t) * (s + t >= k))
@staticmethod
def b():
n, t, *a = map(int, sys.stdin.read().split())
a = np.array(a)
print(np.minimum(a[1:] - a[:-1], t).sum() + t)
@staticmethod
def c():
n, d, k, *lrst = map(int, sys.stdin.read().split())
lrst = np.array(lrst)
lr = lrst[: 2 * d].reshape(d, 2)
s, t = lrst[2 * d :].reshape(k, 2).T
day = np.zeros((k,), dtype=np.int32)
for i in range(d):
l, r = lr[i]
move = (l <= s) & (s <= r) & (s != t)
reach = move & (l <= t) & (t <= r)
s[move & (s < t)] = r
s[move & (s > t)] = l
s[reach] = t[reach]
day[reach] = i + 1
print(*day, sep="\n")
@staticmethod
def d():
a, b, c = map(int, sys.stdin.read().split())
p = MOD
denom = pow(a * b % p - b * c % p + c * a % p, p - 2, p)
w = (b * c - a * b) % p * denom % p
h = (b * c - a * c) % p * denom % p
print(h, w)
class ABC025:
@staticmethod
def a():
s, n = sys.stdin.read().split()
n = int(n)
i, j = divmod(n - 1, 5)
print(s[i] + s[j])
@staticmethod
def b():
n, a, b = map(int, sys.stdin.readline().split())
res = defaultdict(int)
for _ in range(n):
s, d = sys.stdin.readline().split()
d = int(d)
res[s] += min(max(d, a), b)
res = res["East"] - res["West"]
if res == 0:
ans = 0
elif res > 0:
ans = f"East {res}"
else:
ans = f"West {-res}"
print(ans)
@staticmethod
def c():
b = [0] * 6
for i in range(2):
(*row,) = map(int, sys.stdin.readline().split())
for j in range(3):
b[i * 3 + j] = row[j]
c = [0] * 8
for i in range(3):
(*row,) = map(int, sys.stdin.readline().split())
for j in range(2):
c[i * 3 + j] = row[j]
tot = sum(b) + sum(c)
@lru_cache(maxsize=None)
def f(s=tuple(0 for _ in range(9))):
if all(s):
res = 0
for i in range(6):
res += (s[i] == s[i + 3]) * b[i]
for i in range(8):
res += (s[i] == s[i + 1]) * c[i]
return res
cand = [i for i in range(9) if not s[i]]
flg = len(cand) & 1
s = list(s)
res = []
for i in cand:
s[i] = (flg ^ 1) + 1
res.append(f(tuple(s)))
s[i] = 0
return sorted(res, reverse=flg)[0]
a = f()
b = tot - a
print(a)
print(b)
class ABC026:
@staticmethod
def a():
a = int(sys.stdin.readline().rstrip())
print(a // 2 * (a - a // 2))
@staticmethod
def b():
n, *r = map(int, sys.stdin.read().split())
s = np.pi * np.array([0] + r) ** 2
s.sort()
res = s[n::-2].sum() - s[n - 1 :: -2].sum()
print(res)
@staticmethod
def c():
n, *b = map(int, sys.stdin.read().split())
g = GeometryTopology.Graph()
for i in range(1, n):
g.add_edge(b[i - 1] - 1, i, weight=1)
def f(u=0):
if not g.edges[u]:
return 1
s = [f(v) for v in g.edges[u]]
return max(s) + min(s) + 1
print(f())
@staticmethod
def d():
a, b, c = map(int, sys.stdin.readline().split())
def f(t):
return a * t + b * np.sin(c * t * np.pi) - 100
print(optimize.brenth(f, 0, 200))
class ABC027:
@staticmethod
def a():
l = [int(l) for l in sys.stdin.readline().split()]
l.sort()
print(l[2] if l[0] == l[1] else l[0])
@staticmethod
def b():
n, *a = map(int, sys.stdin.read().split())
m, r = divmod(sum(a), n)
if r:
print(-1)
return
population = 0
towns = 0
cnt = 0
for x in a:
population += x
towns += 1
if population / towns != m:
cnt += 1
continue
population, towns = 0, 0
print(cnt)
@staticmethod
def c():
n = int(sys.stdin.readline().rstrip())
flg = n.bit_length() & 1 ^ 1
t = 0
x = 1
while x <= n:
t += 1
x = 2 * x + 1 if t & 1 ^ flg else 2 * x
print("Aoki" if t & 1 else "Takahashi")
class ABC032:
@staticmethod
def a():
a, b, n = map(int, sys.stdin.read().split())
l = NumberTheory.lcm(a, b)
print((n + l - 1) // l * l)
@staticmethod
def b():
s, k = sys.stdin.read().split()
k = int(k)
res = set()
for i in range(len(s) - k + 1):
res.add(s[i : i + k])
print(len(res))
@staticmethod
def c():
n, k, *s = map(int, sys.stdin.read().split())
if 0 in s:
print(n)
return
s += [inf]
res = 0
l = r = 0
tmp = 1
while r <= n:
tmp *= s[r]
while tmp > k:
res = max(res, r - l)
tmp //= s[l]
l += 1
r += 1
print(res)
class ABC033:
@staticmethod
def a():
n = set(sys.stdin.readline().rstrip())
print("SAME" if len(n) == 1 else "DIFFERENT")
@staticmethod
def b():
n = int(sys.stdin.readline().rstrip())
res = dict()
for _ in range(n):
s, p = sys.stdin.readline().split()
p = int(p)
res[s] = p
tot = sum(res.values())
for s, p in res.items():
if p > tot / 2:
print(s)
return
print("atcoder")
@staticmethod
def c():
s = sys.stdin.readline().rstrip()
res = sum(not "0" in f for f in s.split("+"))
print(res)
class ABC034:
@staticmethod
def a():
x, y = map(int, sys.stdin.readline().split())
print("Better" if y > x else "Worse")
@staticmethod
def b():
n = int(sys.stdin.readline().rstrip())
print(n + 1 if n & 1 else n - 1)
@staticmethod
def c():
h, w = map(int, sys.stdin.read().split())
combinatorics = Combinatorics(n=2 * 10**5, mod=MOD)
print(combinatorics.mod_choose(h + w - 2, h - 1))
@staticmethod
def d():
n, k, *wp = map(int, sys.stdin.read().split())
w, p = np.array(wp).reshape(-1, 2).T
def f(x):
return np.sort(w * (p - x))[-k:].sum()
print(optimize.bisect(f, 0, 100))
class ABC035:
@staticmethod
def a():
w, h = map(int, sys.stdin.readline().split())
print("4:3" if 4 * h == 3 * w else "16:9")
@staticmethod
def b():
s, t = sys.stdin.read().split()
y = 0
x = 0
z = 0
for c in s:
if c == "?":
z += 1
elif c == "L":
x -= 1
elif c == "R":
x += 1
elif c == "D":
y -= 1
elif c == "U":
y += 1
d = abs(y) + abs(x)
if t == "1":
print(d + z)
else:
print(max(d - z, (d - z) & 1))
@staticmethod
def c():
n, q, *lr = map(int, sys.stdin.read().split())
l, r = np.array(lr).reshape(q, 2).T
res = np.zeros(n + 1, dtype=int)
np.add.at(res, l - 1, 1)
np.subtract.at(res, r, 1)
np.cumsum(res, out=res)
res = res & 1
print("".join(map(str, res[:-1])))
@staticmethod
def d():
n, m, t = map(int, sys.stdin.readline().split())
point = np.array(sys.stdin.readline().split(), dtype=int)
a, b, c = (
np.array(sys.stdin.read().split(), dtype=np.int64)
.reshape(m, 3)
.T
)
a -= 1
b -= 1
d_1 = shortest_path(
csr_matrix((c, (a, b)), (n, n)),
method="D",
directed=True,
indices=0,
)
d_2 = shortest_path(
csr_matrix((c, (b, a)), (n, n)),
method="D",
directed=True,
indices=0,
)
print(int(np.amax((t - (d_1 + d_2)) * point)))
class ABC036:
@staticmethod
def a():
a, b = map(int, sys.stdin.readline().split())
print((b + a - 1) // a)
@staticmethod
def b():
n, *s = sys.stdin.read().split()
n = int(n)
for j in range(n):
row = ""
for i in range(n - 1, -1, -1):
row += s[i][j]
print(row)
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
b = [None] * n
prev = None
j = -1
for i, x in sorted(enumerate(a), key=lambda x: x[1]):
if x != prev:
j += 1
b[i] = j
prev = x
print(*b, sep="\n")
@staticmethod
def d():
n, *ab = map(int, sys.stdin.read().split())
edges = [[] for _ in range(n)]
for a, b in zip(*[iter(ab)] * 2):
a -= 1
b -= 1
edges[a].append(b)
edges[b].append(a)
parent = [None] * n
def count(u):
black, white = 1, 1
for v in edges[u]:
if v == parent[u]:
continue
parent[v] = u
b, w = count(v)
black *= w
black %= MOD
white *= (b + w) % MOD
white %= MOD
return black, white
print(sum(count(0)) % MOD)
class ABC037:
@staticmethod
def a():
a, b, c = map(int, sys.stdin.readline().split())
print(c // min(a, b))
@staticmethod
def b():
n, q, *lrt = map(int, sys.stdin.read().split())
a = np.zeros(n, dtype=int)
for l, r, t in zip(*[iter(lrt)] * 3):
a[l - 1 : r] = t
print(*a, sep="\n")
@staticmethod
def c():
n, k, *a = map(int, sys.stdin.read().split())
a = np.array([0] + a)
np.cumsum(a, out=a)
s = (a[k:] - a[:-k]).sum()
print(s)
@staticmethod
def d():
h, w = map(int, sys.stdin.readline().split())
a = [
[int(x) for x in sys.stdin.readline().split()]
for _ in range(h)
]
dyx = [(-1, 0), (0, -1), (1, 0), (0, 1)]
path = [[None] * w for _ in range(h)]
def paths(i, j):
if path[i][j]:
return path[i][j]
val = a[i][j]
cnt = 1
for dy, dx in dyx:
y = i + dy
x = j + dx
if 0 <= y < h and 0 <= x < w and a[y][x] < val:
cnt += paths(y, x)
cnt %= MOD
path[i][j] = cnt
return cnt
tot = 0
for i in range(h):
for j in range(w):
tot += paths(i, j)
tot %= MOD
print(tot)
class ABC038:
@staticmethod
def a():
s = sys.stdin.readline().rstrip()
print("YES" if s[-1] == "T" else "NO")
@staticmethod
def b():
a, b, c, d = map(int, sys.stdin.read().split())
print("YES" if a == c or b == c or a == d or b == d else "NO")
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
a += [-1]
cnt = n
tmp = 1
for i in range(n):
if a[i + 1] > a[i]:
tmp += 1
else:
cnt += tmp * (tmp - 1) // 2
tmp = 1
print(cnt)
@staticmethod
def d():
n, *wh = map(int, sys.stdin.read().split())
wh = sorted(zip(*[iter(wh)] * 2), key=lambda x: (-x[0], x[1]))
w = [x[1] for x in wh][::-1]
res = [inf] * n
for x in w:
res[bi_l(res, x)] = x
print(bi_l(res, inf))
class ABC039:
@staticmethod
def a():
a, b, c = map(int, sys.stdin.readline().split())
print((a * b + b * c + c * a) * 2)
@staticmethod
def b():
x = int(sys.stdin.readline().rstrip())
for n in range(1, int(x**0.5) + 1):
if pow(n, 4) == x:
print(n)
return
@staticmethod
def c():
board = "WBWBWWBWBWBW" * 3
convert = "Do, *, Re, *, Mi, Fa, *, So, *, La, *, Si".split(", ")
s = sys.stdin.readline().rstrip()
print(convert[board.index(s)])
@staticmethod
def d():
h, w = map(int, sys.stdin.readline().split())
s = sys.stdin.read().split()
dyx = list(itertools.product((-1, 0, 1), repeat=2))
black_certain = set()
black_before = set()
for i in range(h):
for j in range(w):
black_cand = set()
for dy, dx in dyx:
y = i + dy
x = j + dx
if y < 0 or y >= h or x < 0 or x >= w:
continue
if s[y][x] == ".":
break
black_cand.add((y, x))
else:
black_before.add((i, j))
black_certain |= black_cand
for i in range(h):
for j in range(w):
if s[i][j] == "#" and not (i, j) in black_certain:
print("impossible")
return
print("possible")
for i in range(h):
row = ""
for j in range(w):
row += "#" if (i, j) in black_before else "."
print("".join(row))
class ABC040:
@staticmethod
def a():
n, x = map(int, sys.stdin.readline().split())
print(min(x - 1, n - x))
@staticmethod
def b():
n = int(sys.stdin.readline().rstrip())
res = inf
for i in range(1, int(n**0.5) + 1):
res = min(res, n // i - i + n % i)
print(res)
@staticmethod
def c():
n, *h = map(int, sys.stdin.read().split())
h = [h[0]] + h
cost = [None] * (n + 1)
cost[0] = cost[1] = 0
for i in range(2, n + 1):
cost[i] = min(
cost[i - 2] + abs(h[i] - h[i - 2]),
cost[i - 1] + abs(h[i] - h[i - 1]),
)
print(cost[n])
@staticmethod
def d():
n, m = map(int, sys.stdin.readline().split())
uf = GeometryTopology.UnionFind(n=n)
queue = []
for _ in range(m):
a, b, y = map(int, sys.stdin.readline().split())
heappush(queue, (-(2 * y), a - 1, b - 1))
q = int(sys.stdin.readline().rstrip())
for i in range(q):
v, y = map(int, sys.stdin.readline().split())
heappush(queue, (-(2 * y + 1), v - 1, i))
res = [None] * q
while queue:
y, i, j = heappop(queue)
if y & 1:
res[j] = uf.size[uf.find_root(i)]
else:
uf.unite(i, j)
print(*res, sep="\n")
class ABC041:
@staticmethod
def a():
s, i = sys.stdin.read().split()
i = int(i)
print(s[i - 1])
@staticmethod
def b():
a, b, c = map(int, sys.stdin.readline().split())
ans = a * b % MOD * c % MOD
print(ans)
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
for i, h in sorted(enumerate(a), key=lambda x: -x[1]):
print(i + 1)
@staticmethod
def d():
n, m, *xy = map(int, sys.stdin.read().split())
(*xy,) = zip(*[iter(xy)] * 2)
edges = [0] * n
for x, y in xy:
x -= 1
y -= 1
edges[x] |= 1 << y
comb = [None] * (1 << n)
comb[0] = 1
def count(edges, bit):
if comb[bit] is not None:
return comb[bit]
comb[bit] = 0
for i in range(n):
if (bit >> i) & 1 and not edges[i]:
nxt_bit = bit & ~(1 << i)
nxt_edges = edges.copy()
for j in range(n):
nxt_edges[j] &= ~(1 << i)
cnt = count(nxt_edges, nxt_bit)
comb[bit] += cnt
return comb[bit]
print(count(edges, (1 << n) - 1))
class ABC042:
@staticmethod
def a():
a = [int(x) for x in sys.stdin.readline().split()]
c = Counter(a)
print("YES" if c[5] == 2 and c[7] == 1 else "NO")
@staticmethod
def b():
n, l, *s = sys.stdin.read().split()
print("".join(sorted(s)))
@staticmethod
def c():
n, k, *d = sys.stdin.read().split()
l = len(n)
ok = sorted(set(string.digits) - set(d))
cand = [
int("".join(p)) for p in itertools.product(ok, repeat=l)
] + [int(min(x for x in ok if x > "0") + min(ok) * l)]
print(cand[bi_l(cand, int(n))])
@staticmethod
def d():
h, w, a, b = map(int, sys.stdin.read().split())
combinatorics = Combinatorics(n=2 * 10**5, mod=MOD)
tot = combinatorics.mod_choose(h + w - 2, h - 1)
i = np.arange(h - a, h)
ng = np.sum(
combinatorics.mod_choose(i + b - 1, i)
* combinatorics.mod_choose(h - i + w - b - 2, h - 1 - i)
% MOD
)
tot -= ng
tot %= MOD
print(tot)
class ABC043:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
print((1 + n) * n // 2)
@staticmethod
def b():
s = sys.stdin.readline().rstrip()
t = ""
for c in s:
if c == "B":
t = t[:-1]
else:
t += c
print(t)
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
a = np.array(a)
x = np.around(a.sum() / n).astype(int)
print(np.sum((a - x) ** 2))
@staticmethod
def d():
s = sys.stdin.readline().rstrip()
n = len(s)
for i in range(n - 1):
if s[i] == s[i + 1]:
print(i + 1, i + 2)
return
for i in range(n - 2):
if s[i] == s[i + 2]:
print(i + 1, i + 3)
return
print(-1, -1)
class ABC170:
@staticmethod
def a():
x = [int(x) for x in sys.stdin.readline().split()]
for i in range(5):
if x[i] != i + 1:
print(i + 1)
break
@staticmethod
def b():
x, y = map(int, sys.stdin.readline().split())
print("Yes" if 2 * x <= y <= 4 * x and y % 2 == 0 else "No")
@staticmethod
def c():
x, n, *p = map(int, sys.stdin.read().split())
a = list(set(range(102)) - set(p))
a = [(abs(y - x), y) for y in a]
print(sorted(a)[0][1])
@staticmethod
def d():
n, *a = map(int, sys.stdin.read().split())
cand = set(a)
cnt = 0
for x, c in sorted(Counter(a).items()):
cnt += c == 1 and x in cand
cand -= set(range(x * 2, 10**6 + 1, x))
print(cnt)
@staticmethod
def e():
n, q = map(int, sys.stdin.readline().split())
queue = []
m = 2 * 10**5
infants = [[] for _ in range(m)]
highest_rate = [None] * m
where = [None] * n
rate = [None] * n
def entry(i, k):
where[i] = k
while infants[k]:
r, j = heappop(infants[k])
if where[j] != k or j == i:
continue
if rate[i] >= -r:
highest_rate[k] = rate[i]
heappush(queue, (rate[i], k, i))
heappush(infants[k], (r, j))
break
else:
highest_rate[k] = rate[i]
heappush(queue, (rate[i], k, i))
heappush(infants[k], (-rate[i], i))
def transfer(i, k):
now = where[i]
while infants[now]:
r, j = heappop(infants[now])
if where[j] != now or j == i:
continue
if highest_rate[now] != -r:
highest_rate[now] = -r
heappush(queue, (-r, now, j))
heappush(infants[now], (r, j))
break
else:
highest_rate[now] = None
entry(i, k)
def inquire():
while True:
r, k, i = heappop(queue)
if where[i] != k or r != highest_rate[k]:
continue
heappush(queue, (r, k, i))
return r
for i in range(n):
a, b = map(int, sys.stdin.readline().split())
rate[i] = a
entry(i, b - 1)
for _ in range(q):
c, d = map(int, sys.stdin.readline().split())
transfer(c - 1, d - 1)
print(inquire())
class ABC171:
@staticmethod
def a():
c = sys.stdin.readline().rstrip()
print("A" if c < "a" else "a")
@staticmethod
def b():
n, k, *p = map(int, sys.stdin.read().split())
print(sum(sorted(p)[:k]))
@staticmethod
def c():
n = int(sys.stdin.readline().rstrip())
n -= 1
l = 1
while True:
if n < pow(26, l):
break
n -= pow(26, l)
l += 1
res = "".join(
[chr(ord("a") + d) for d in NumberTheory.base_convert(n, 26)][
::-1
]
)
res = "a" * (l - len(res)) + res
print(res)
@staticmethod
def d():
n = int(sys.stdin.readline().rstrip())
a = [int(x) for x in sys.stdin.readline().split()]
s = sum(a)
cnt = Counter(a)
q = int(sys.stdin.readline().rstrip())
for _ in range(q):
b, c = map(int, sys.stdin.readline().split())
s += (c - b) * cnt[b]
print(s)
cnt[c] += cnt[b]
cnt[b] = 0
@staticmethod
def e():
n, *a = map(int, sys.stdin.read().split())
s = 0
for x in a:
s ^= x
b = map(lambda x: x ^ s, a)
print(*b, sep=" ")
class ABC172:
@staticmethod
def a():
a = int(sys.stdin.readline().rstrip())
print(a * (1 + a + a**2))
@staticmethod
def b():
s, t = sys.stdin.read().split()
print(sum(s[i] != t[i] for i in range(len(s))))
@staticmethod
def c():
n, m, k = map(int, sys.stdin.readline().split())
a = [0] + [int(x) for x in sys.stdin.readline().split()]
b = [int(x) for x in sys.stdin.readline().split()]
(*sa,) = itertools.accumulate(a)
(*sb,) = itertools.accumulate(b)
res = 0
for i in range(n + 1):
r = k - sa[i]
if r < 0:
break
res = max(res, i + bi_r(sb, r))
print(res)
@staticmethod
def d():
n = int(sys.stdin.readline().rstrip())
f = np.zeros(n + 1, dtype=np.int64)
for i in range(1, n + 1):
f[i::i] += 1
print((np.arange(1, n + 1) * f[1:]).sum())
class ABC173:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
charge = (n + 999) // 1000 * 1000 - n
print(charge)
@staticmethod
def b():
n, *s = sys.stdin.read().split()
c = Counter(s)
for v in "AC, WA, TLE, RE".split(", "):
print(f"{v} x {c[v]}")
@staticmethod
def c():
h, w, k = map(int, sys.stdin.readline().split())
c = [sys.stdin.readline().rstrip() for _ in range(h)]
tot = 0
for i in range(1 << h):
for j in range(1 << w):
cnt = 0
for y in range(h):
for x in range(w):
if i >> y & 1 or j >> x & 1:
continue
cnt += c[y][x] == "#"
tot += cnt == k
print(tot)
@staticmethod
def d():
n, *a = map(int, sys.stdin.read().split())
a.sort(reverse=True)
res = (
a[0]
+ sum(a[1 : 1 + (n - 2) // 2]) * 2
+ a[1 + (n - 2) // 2] * (n & 1)
)
print(res)
@staticmethod
def e():
MOD = 10**9 + 7
n, k, *a = map(int, sys.stdin.read().split())
minus = [x for x in a if x < 0]
plus = [x for x in a if x > 0]
if len(plus) + len(minus) // 2 * 2 >= k:
(*minus,) = map(abs, minus)
minus.sort(reverse=True)
plus.sort(reverse=True)
cand = []
if len(minus) & 1:
minus = minus[:-1]
for i in range(0, len(minus) - 1, 2):
cand.append(minus[i] * minus[i + 1] % MOD)
if k & 1:
res = plus[0]
plus = plus[1:]
else:
res = 1
if len(plus) & 1:
plus = plus[:-1]
for i in range(0, len(plus) - 1, 2):
cand.append(plus[i] * plus[i + 1] % MOD)
cand.sort(reverse=True)
for x in cand[: k // 2]:
res *= x
res %= MOD
print(res)
elif 0 in a:
print(0)
else:
cand = sorted(map(abs, a))
res = 1
for i in range(k):
res *= cand[i]
res %= MOD
res = MOD - res
print(res)
pass
class ABC174:
@staticmethod
def a():
print("Yes" if int(sys.stdin.readline().rstrip()) >= 30 else "No")
class ACL001:
@staticmethod
def a():
n, *xy = map(int, sys.stdin.read().split())
(*xy,) = zip(*[iter(xy)] * 2)
print(xy)
pass
class MSolutions2020:
@staticmethod
def a():
x = int(sys.stdin.readline().rstrip())
x -= 400
print(8 - x // 200)
@staticmethod
def b():
r, g, b, k = map(int, sys.stdin.read().split())
while k and g <= r:
g *= 2
k -= 1
while k and b <= g:
b *= 2
k -= 1
print("Yes" if r < g < b else "No")
@staticmethod
def c():
n, k, *a = map(int, sys.stdin.read().split())
for i in range(k, n):
print("Yes" if a[i] > a[i - k] else "No")
@staticmethod
def d():
n, *a = map(int, sys.stdin.read().split())
a += [-1]
m = 1000
s = 0
for i in range(n):
if a[i + 1] == a[i]:
continue
elif a[i + 1] > a[i]:
cnt = m // a[i]
m -= a[i] * cnt
s += cnt
else:
m += a[i] * s
s = 0
print(m)
class Codeforces:
pass
class ProjectEuler:
@staticmethod
def p1():
def f(n, x):
return (x + n // x * x) * (n // x) // 2
n = 1000
ans = f(n - 1, 3) + f(n - 1, 5) - f(n - 1, 15)
print(ans)
@staticmethod
def p2():
fib = [1, 2]
while fib[-1] < 4 * 10**6:
fib.append(fib[-1] + fib[-2])
print(sum(fib[1:-1:3]))
@staticmethod
def p3():
number_theory = NumberTheory()
res = number_theory.prime_factorize(600851475143)
print(max(res.keys()))
@staticmethod
def p4():
def is_palindrome(n):
n = str(n)
return n == n[::-1]
cand = []
for a in range(100, 1000):
for b in range(a, 1000):
n = a * b
if is_palindrome(n):
cand.append(n)
print(max(cand))
@staticmethod
def p5():
number_theory = NumberTheory()
res = defaultdict(int)
for i in range(1, 21):
for p, c in number_theory.prime_factorize(i).items():
res[p] = max(res[p], c)
ans = 1
for p, c in res.items():
ans *= pow(p, c)
print(ans)
@staticmethod
def p6():
a = np.arange(101)
b = np.cumsum(a**2)
a = a.cumsum()
print(a[100] ** 2 - b[100])
@staticmethod
def p7():
number_theory = NumberTheory()
print(sorted(number_theory.prime_numbers)[10000])
@staticmethod
def p8():
n = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
n = [int(d) for d in list(n)]
res = 0
for i in range(988):
x = 1
for j in range(13):
x *= n[i + j]
res = max(res, x)
print(res)
@staticmethod
def p9():
for a in range(1, 997):
for b in range(a, 998 - a):
c = 1000 - a - b
if a**2 + b**2 == c**2:
print(a * b * c)
return
@staticmethod
def p10():
number_theory = NumberTheory(2 * 10**6 - 1)
print(sum(number_theory.prime_numbers))
@staticmethod
def p11():
grid = "08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48"
print(grid)
pass
class Yukicoder:
pass
if __name__ == "__main__":
AtCoder.ABC009.d()
| true
| true
|
7906f6703118c8c80c7b717845aff984c9f1b225
| 19,073
|
py
|
Python
|
owslib/coverage/wcs100.py
|
ferreteleco/OWSLib
|
ec4ac8d8006ebf8049319d282314b0e1e6263472
|
[
"BSD-3-Clause"
] | null | null | null |
owslib/coverage/wcs100.py
|
ferreteleco/OWSLib
|
ec4ac8d8006ebf8049319d282314b0e1e6263472
|
[
"BSD-3-Clause"
] | null | null | null |
owslib/coverage/wcs100.py
|
ferreteleco/OWSLib
|
ec4ac8d8006ebf8049319d282314b0e1e6263472
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2007 STFC <http://www.stfc.ac.uk>
#
# Authors :
# Dominic Lowe <d.lowe@rl.ac.uk>
#
# Contact email: d.lowe@rl.ac.uk
# =============================================================================
from owslib.coverage.wcsBase import WCSBase, WCSCapabilitiesReader, ServiceException
from urllib.parse import urlencode
from owslib.util import openURL, testXMLValue
from owslib.etree import etree
from owslib.crs import Crs
import os
import errno
import logging
from owslib.util import log, makeString
# function to save writing out WCS namespace in full each time
def ns(tag):
return '{http://www.opengis.net/wcs}' + tag
class WebCoverageService_1_0_0(WCSBase):
"""Abstraction for OGC Web Coverage Service (WCS), version 1.0.0
Implements IWebCoverageService.
"""
def __getitem__(self, name):
''' check contents dictionary to allow dict like access to service layers'''
if name in list(self.__getattribute__('contents').keys()):
return self.__getattribute__('contents')[name]
else:
raise KeyError("No content named %s" % name)
def __init__(self, url, xml, cookies, auth=None, timeout=30):
super(WebCoverageService_1_0_0, self).__init__(auth)
self.version = '1.0.0'
self.url = url
self.cookies = cookies
self.timeout = timeout
# initialize from saved capability document or access the server
reader = WCSCapabilitiesReader(self.version, self.cookies, self.auth)
if xml:
self._capabilities = reader.readString(xml)
else:
self._capabilities = reader.read(self.url, self.timeout)
# check for exceptions
se = self._capabilities.find('ServiceException')
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message, xml)
self.updateSequence = self._capabilities.attrib.get('updateSequence')
# serviceIdentification metadata
subelem = self._capabilities.find(ns('Service'))
self.identification = ServiceIdentification(subelem)
# serviceProvider metadata
subelem = self._capabilities.find(ns('Service/') + ns('responsibleParty'))
self.provider = ServiceProvider(subelem)
# serviceOperations metadata
self.operations = []
for elem in self._capabilities.find(ns('Capability/') + ns('Request'))[:]:
self.operations.append(OperationMetadata(elem))
# serviceContents metadata
self.contents = {}
for elem in self._capabilities.findall(ns('ContentMetadata/') + ns('CoverageOfferingBrief')):
cm = ContentMetadata(elem, self)
self.contents[cm.id] = cm
# Some WCS servers (wrongly) advertise 'Content' OfferingBrief instead.
if self.contents == {}:
for elem in self._capabilities.findall(ns('ContentMetadata/') + ns('ContentOfferingBrief')):
cm = ContentMetadata(elem, self)
self.contents[cm.id] = cm
# exceptions
self.exceptions = [f.text for f in self._capabilities.findall('Capability/Exception/Format')]
def items(self):
'''supports dict-like items() access'''
items = []
for item in self.contents:
items.append((item, self.contents[item]))
return items
def getCoverage(self, identifier=None, bbox=None, time=None, format=None, crs=None, width=None, height=None,
resx=None, resy=None, resz=None, parameter=None, method='Get', timeout=30, **kwargs):
"""Request and return a coverage from the WCS as a file-like object
note: additional **kwargs helps with multi-version implementation
core keyword arguments should be supported cross version
example:
cvg=wcs.getCoverage(identifier=['TuMYrRQ4'], timeSequence=['2792-06-01T00:00:00.0'], bbox=(-112,36,-106,41),
format='cf-netcdf')
is equivalent to:
http://myhost/mywcs?SERVICE=WCS&REQUEST=GetCoverage&IDENTIFIER=TuMYrRQ4&VERSION=1.1.0&BOUNDINGBOX=-180,-90,180,90&TIME=2792-06-01T00:00:00.0&FORMAT=cf-netcdf
"""
if log.isEnabledFor(logging.DEBUG):
msg = 'WCS 1.0.0 DEBUG: Parameters passed to GetCoverage: identifier={}, bbox={}, time={}, format={}, crs={}, width={}, height={}, resx={}, resy={}, resz={}, parameter={}, method={}, other_arguments={}' # noqa
log.debug(msg.format(
identifier, bbox, time, format, crs, width, height, resx, resy, resz, parameter, method, str(kwargs)))
try:
base_url = next((m.get('url') for m in self.getOperationByName('GetCoverage').methods
if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
log.debug('WCS 1.0.0 DEBUG: base url of server: %s' % base_url)
# process kwargs
request = {'version': self.version, 'request': 'GetCoverage', 'service': 'WCS'}
assert len(identifier) > 0
request['Coverage'] = identifier
# request['identifier'] = ','.join(identifier)
if bbox:
request['BBox'] = ','.join([makeString(x) for x in bbox])
else:
request['BBox'] = None
if time:
request['time'] = ','.join(time)
if crs:
request['crs'] = crs
request['format'] = format
if width:
request['width'] = width
if height:
request['height'] = height
if resx:
request['resx'] = resx
if resy:
request['resy'] = resy
if resz:
request['resz'] = resz
# anything else e.g. vendor specific parameters must go through kwargs
if kwargs:
for kw in kwargs:
request[kw] = kwargs[kw]
# encode and request
data = urlencode(request)
log.debug('WCS 1.0.0 DEBUG: Second part of URL: %s' % data)
u = openURL(base_url, data, method, self.cookies, auth=self.auth, timeout=timeout)
return u
def getOperationByName(self, name):
"""Return a named operation item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
class OperationMetadata(object):
"""Abstraction for WCS metadata.
Implements IMetadata.
"""
def __init__(self, elem):
"""."""
self.name = elem.tag.split('}')[1]
# self.formatOptions = [f.text for f in elem.findall('{http://www.opengis.net/wcs/1.1/ows}Parameter/{http://www.opengis.net/wcs/1.1/ows}AllowedValues/{http://www.opengis.net/wcs/1.1/ows}Value')] # noqa
self.methods = []
for resource in elem.findall(ns('DCPType/') + ns('HTTP/') + ns('Get/') + ns('OnlineResource')):
url = resource.attrib['{http://www.w3.org/1999/xlink}href']
self.methods.append({'type': 'Get', 'url': url})
for resource in elem.findall(ns('DCPType/') + ns('HTTP/') + ns('Post/') + ns('OnlineResource')):
url = resource.attrib['{http://www.w3.org/1999/xlink}href']
self.methods.append({'type': 'Post', 'url': url})
class ServiceIdentification(object):
""" Abstraction for ServiceIdentification metadata """
def __init__(self, elem):
# properties
self.type = 'OGC:WCS'
self.version = '1.0.0'
self.service = testXMLValue(elem.find(ns('name')))
self.abstract = testXMLValue(elem.find(ns('description')))
self.title = testXMLValue(elem.find(ns('label')))
self.keywords = [f.text for f in elem.findall(ns('keywords') + '/' + ns('keyword'))]
# note: differs from 'rights' in interface
self.fees = elem.find(ns('fees')).text
self.accessConstraints = elem.find(ns('accessConstraints')).text
class ServiceProvider(object):
""" Abstraction for WCS ResponsibleParty
Implements IServiceProvider"""
def __init__(self, elem):
# it's not uncommon for the service provider info to be missing
# so handle case where None is passed in
if elem is None:
self.name = None
self.url = None
self.contact = None
else:
self.name = testXMLValue(elem.find(ns('organisationName')))
self.url = self.name # there is no definitive place for url WCS, repeat organisationName
self.contact = ContactMetadata(elem)
class ContactMetadata(object):
''' implements IContactMetadata'''
def __init__(self, elem):
try:
self.name = elem.find(ns('individualName')).text
except AttributeError:
self.name = None
try:
self.organization = elem.find(ns('organisationName')).text
except AttributeError:
self.organization = None
try:
self.address = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('deliveryPoint')).text
except AttributeError:
self.address = None
try:
self.city = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('city')).text
except AttributeError:
self.city = None
try:
self.region = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('administrativeArea')).text
except AttributeError:
self.region = None
try:
self.postcode = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('postalCode')).text
except AttributeError:
self.postcode = None
try:
self.country = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('country')).text
except AttributeError:
self.country = None
try:
self.email = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('electronicMailAddress')).text
except AttributeError:
self.email = None
class ContentMetadata(object):
"""
Implements IContentMetadata
"""
def __init__(self, elem, service):
"""Initialize. service is required so that describeCoverage requests may be made"""
# TODO - examine the parent for bounding box info.
# self._parent=parent
self._elem = elem
self._service = service
self.id = elem.find(ns('name')).text
self.title = testXMLValue(elem.find(ns('label')))
self.abstract = testXMLValue(elem.find(ns('description')))
self.keywords = [f.text for f in elem.findall(ns('keywords') + '/' + ns('keyword'))]
self.boundingBox = None # needed for iContentMetadata harmonisation
self.boundingBoxWGS84 = None
b = elem.find(ns('lonLatEnvelope'))
if b is not None:
gmlpositions = b.findall('{http://www.opengis.net/gml}pos')
lc = gmlpositions[0].text
uc = gmlpositions[1].text
self.boundingBoxWGS84 = (
float(lc.split()[0]), float(lc.split()[1]),
float(uc.split()[0]), float(uc.split()[1]),
)
# others not used but needed for iContentMetadata harmonisation
self.styles = None
self.crsOptions = None
self.defaulttimeposition = None
# grid is either a gml:Grid or a gml:RectifiedGrid if supplied as part of the DescribeCoverage response.
def _getGrid(self):
if not hasattr(self, 'descCov'):
self.descCov = self._service.getDescribeCoverage(self.id)
gridelem = self.descCov.find(
ns('CoverageOffering/') + ns('domainSet/') + ns('spatialDomain/') + '{http://www.opengis.net/gml}RectifiedGrid') # noqa
if gridelem is not None:
grid = RectifiedGrid(gridelem)
else:
gridelem = self.descCov.find(
ns('CoverageOffering/') + ns('domainSet/') + ns('spatialDomain/') + '{http://www.opengis.net/gml}Grid') # noqa
grid = Grid(gridelem)
return grid
grid = property(_getGrid, None)
# timelimits are the start/end times, timepositions are all timepoints.
# WCS servers can declare one or both or neither of these.
def _getTimeLimits(self):
timepoints, timelimits = [], []
b = self._elem.find(ns('lonLatEnvelope'))
if b is not None:
timepoints = b.findall('{http://www.opengis.net/gml}timePosition')
else:
# have to make a describeCoverage request...
if not hasattr(self, 'descCov'):
self.descCov = self._service.getDescribeCoverage(self.id)
for pos in self.descCov.findall(
ns('CoverageOffering/') + ns('domainSet/') + ns('temporalDomain/') + '{http://www.opengis.net/gml}timePosition'): # noqa
timepoints.append(pos)
if timepoints:
timelimits = [timepoints[0].text, timepoints[1].text]
return timelimits
timelimits = property(_getTimeLimits, None)
def _getTimePositions(self):
timepositions = []
if not hasattr(self, 'descCov'):
self.descCov = self._service.getDescribeCoverage(self.id)
for pos in self.descCov.findall(
ns('CoverageOffering/') + ns('domainSet/') + ns('temporalDomain/') + '{http://www.opengis.net/gml}timePosition'): # noqa
timepositions.append(pos.text)
return timepositions
timepositions = property(_getTimePositions, None)
def _getOtherBoundingBoxes(self):
''' incomplete, should return other bounding boxes not in WGS84
#TODO: find any other bounding boxes. Need to check for gml:EnvelopeWithTimePeriod.'''
bboxes = []
if not hasattr(self, 'descCov'):
self.descCov = self._service.getDescribeCoverage(self.id)
for envelope in self.descCov.findall(
ns('CoverageOffering/') + ns('domainSet/') + ns('spatialDomain/') + '{http://www.opengis.net/gml}Envelope'): # noqa
bbox = {}
bbox['nativeSrs'] = envelope.attrib['srsName']
gmlpositions = envelope.findall('{http://www.opengis.net/gml}pos')
lc = gmlpositions[0].text.split()
uc = gmlpositions[1].text.split()
bbox['bbox'] = (
float(lc[0]), float(lc[1]),
float(uc[0]), float(uc[1])
)
bboxes.append(bbox)
return bboxes
boundingboxes = property(_getOtherBoundingBoxes, None)
def _getSupportedCRSProperty(self):
# gets supported crs info
crss = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('supportedCRSs/') + ns('responseCRSs')):
for crs in elem.text.split(' '):
crss.append(Crs(crs))
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('supportedCRSs/') + ns('requestResponseCRSs')):
for crs in elem.text.split(' '):
crss.append(Crs(crs))
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('supportedCRSs/') + ns('nativeCRSs')):
for crs in elem.text.split(' '):
crss.append(Crs(crs))
return crss
supportedCRS = property(_getSupportedCRSProperty, None)
def _getSupportedFormatsProperty(self):
# gets supported formats info
frmts = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('supportedFormats/') + ns('formats')):
frmts.append(elem.text)
return frmts
supportedFormats = property(_getSupportedFormatsProperty, None)
def _getAxisDescriptionsProperty(self):
# gets any axis descriptions contained in the rangeset (requires a DescribeCoverage call to server).
axisDescs = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('rangeSet/') + ns('RangeSet/') + ns('axisDescription/') + ns('AxisDescription')): # noqa
axisDescs.append(AxisDescription(elem)) # create a 'AxisDescription' object.
return axisDescs
axisDescriptions = property(_getAxisDescriptionsProperty, None)
# Adding classes to represent gml:grid and gml:rectifiedgrid. One of these is used for the cvg.grid property
# (where cvg is a member of the contents dictionary)
# There is no simple way to convert the offset values in a rectifiedgrid grid to real values without CRS understanding,
# therefore this is beyond the current scope of owslib, so the representation here is purely to provide access
# to the information in the GML.
class Grid(object):
''' Simple grid class to provide axis and value information for a gml grid '''
def __init__(self, grid):
self.axislabels = []
self.dimension = None
self.lowlimits = []
self.highlimits = []
if grid is not None:
self.dimension = int(grid.get('dimension'))
self.lowlimits = grid.find(
'{http://www.opengis.net/gml}limits/{http://www.opengis.net/gml}GridEnvelope/{http://www.opengis.net/gml}low').text.split(' ') # noqa
self.highlimits = grid.find(
'{http://www.opengis.net/gml}limits/{http://www.opengis.net/gml}GridEnvelope/{http://www.opengis.net/gml}high').text.split(' ') # noqa
for axis in grid.findall('{http://www.opengis.net/gml}axisName'):
self.axislabels.append(axis.text)
class RectifiedGrid(Grid):
''' RectifiedGrid class, extends Grid with additional offset vector information '''
def __init__(self, rectifiedgrid):
super(RectifiedGrid, self).__init__(rectifiedgrid)
self.origin = rectifiedgrid.find(
'{http://www.opengis.net/gml}origin/{http://www.opengis.net/gml}pos').text.split()
self.offsetvectors = []
for offset in rectifiedgrid.findall('{http://www.opengis.net/gml}offsetVector'):
self.offsetvectors.append(offset.text.split())
class AxisDescription(object):
''' Class to represent the AxisDescription element optionally found as part of the RangeSet and used to
define ordinates of additional dimensions such as wavelength bands or pressure levels'''
def __init__(self, axisdescElem):
self.name = self.label = None
self.values = []
for elem in axisdescElem.getchildren():
if elem.tag == ns('name'):
self.name = elem.text
elif elem.tag == ns('label'):
self.label = elem.text
elif elem.tag == ns('values'):
for child in elem.getchildren():
self.values.append(child.text)
| 43.446469
| 222
| 0.604677
|
from owslib.coverage.wcsBase import WCSBase, WCSCapabilitiesReader, ServiceException
from urllib.parse import urlencode
from owslib.util import openURL, testXMLValue
from owslib.etree import etree
from owslib.crs import Crs
import os
import errno
import logging
from owslib.util import log, makeString
def ns(tag):
return '{http://www.opengis.net/wcs}' + tag
class WebCoverageService_1_0_0(WCSBase):
def __getitem__(self, name):
if name in list(self.__getattribute__('contents').keys()):
return self.__getattribute__('contents')[name]
else:
raise KeyError("No content named %s" % name)
def __init__(self, url, xml, cookies, auth=None, timeout=30):
super(WebCoverageService_1_0_0, self).__init__(auth)
self.version = '1.0.0'
self.url = url
self.cookies = cookies
self.timeout = timeout
reader = WCSCapabilitiesReader(self.version, self.cookies, self.auth)
if xml:
self._capabilities = reader.readString(xml)
else:
self._capabilities = reader.read(self.url, self.timeout)
se = self._capabilities.find('ServiceException')
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message, xml)
self.updateSequence = self._capabilities.attrib.get('updateSequence')
subelem = self._capabilities.find(ns('Service'))
self.identification = ServiceIdentification(subelem)
subelem = self._capabilities.find(ns('Service/') + ns('responsibleParty'))
self.provider = ServiceProvider(subelem)
self.operations = []
for elem in self._capabilities.find(ns('Capability/') + ns('Request'))[:]:
self.operations.append(OperationMetadata(elem))
self.contents = {}
for elem in self._capabilities.findall(ns('ContentMetadata/') + ns('CoverageOfferingBrief')):
cm = ContentMetadata(elem, self)
self.contents[cm.id] = cm
if self.contents == {}:
for elem in self._capabilities.findall(ns('ContentMetadata/') + ns('ContentOfferingBrief')):
cm = ContentMetadata(elem, self)
self.contents[cm.id] = cm
self.exceptions = [f.text for f in self._capabilities.findall('Capability/Exception/Format')]
def items(self):
items = []
for item in self.contents:
items.append((item, self.contents[item]))
return items
def getCoverage(self, identifier=None, bbox=None, time=None, format=None, crs=None, width=None, height=None,
resx=None, resy=None, resz=None, parameter=None, method='Get', timeout=30, **kwargs):
if log.isEnabledFor(logging.DEBUG):
msg = 'WCS 1.0.0 DEBUG: Parameters passed to GetCoverage: identifier={}, bbox={}, time={}, format={}, crs={}, width={}, height={}, resx={}, resy={}, resz={}, parameter={}, method={}, other_arguments={}'
log.debug(msg.format(
identifier, bbox, time, format, crs, width, height, resx, resy, resz, parameter, method, str(kwargs)))
try:
base_url = next((m.get('url') for m in self.getOperationByName('GetCoverage').methods
if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
log.debug('WCS 1.0.0 DEBUG: base url of server: %s' % base_url)
request = {'version': self.version, 'request': 'GetCoverage', 'service': 'WCS'}
assert len(identifier) > 0
request['Coverage'] = identifier
if bbox:
request['BBox'] = ','.join([makeString(x) for x in bbox])
else:
request['BBox'] = None
if time:
request['time'] = ','.join(time)
if crs:
request['crs'] = crs
request['format'] = format
if width:
request['width'] = width
if height:
request['height'] = height
if resx:
request['resx'] = resx
if resy:
request['resy'] = resy
if resz:
request['resz'] = resz
if kwargs:
for kw in kwargs:
request[kw] = kwargs[kw]
data = urlencode(request)
log.debug('WCS 1.0.0 DEBUG: Second part of URL: %s' % data)
u = openURL(base_url, data, method, self.cookies, auth=self.auth, timeout=timeout)
return u
def getOperationByName(self, name):
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
class OperationMetadata(object):
def __init__(self, elem):
self.name = elem.tag.split('}')[1]
self.methods = []
for resource in elem.findall(ns('DCPType/') + ns('HTTP/') + ns('Get/') + ns('OnlineResource')):
url = resource.attrib['{http://www.w3.org/1999/xlink}href']
self.methods.append({'type': 'Get', 'url': url})
for resource in elem.findall(ns('DCPType/') + ns('HTTP/') + ns('Post/') + ns('OnlineResource')):
url = resource.attrib['{http://www.w3.org/1999/xlink}href']
self.methods.append({'type': 'Post', 'url': url})
class ServiceIdentification(object):
def __init__(self, elem):
self.type = 'OGC:WCS'
self.version = '1.0.0'
self.service = testXMLValue(elem.find(ns('name')))
self.abstract = testXMLValue(elem.find(ns('description')))
self.title = testXMLValue(elem.find(ns('label')))
self.keywords = [f.text for f in elem.findall(ns('keywords') + '/' + ns('keyword'))]
self.fees = elem.find(ns('fees')).text
self.accessConstraints = elem.find(ns('accessConstraints')).text
class ServiceProvider(object):
def __init__(self, elem):
# so handle case where None is passed in
if elem is None:
self.name = None
self.url = None
self.contact = None
else:
self.name = testXMLValue(elem.find(ns('organisationName')))
self.url = self.name # there is no definitive place for url WCS, repeat organisationName
self.contact = ContactMetadata(elem)
class ContactMetadata(object):
def __init__(self, elem):
try:
self.name = elem.find(ns('individualName')).text
except AttributeError:
self.name = None
try:
self.organization = elem.find(ns('organisationName')).text
except AttributeError:
self.organization = None
try:
self.address = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('deliveryPoint')).text
except AttributeError:
self.address = None
try:
self.city = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('city')).text
except AttributeError:
self.city = None
try:
self.region = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('administrativeArea')).text
except AttributeError:
self.region = None
try:
self.postcode = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('postalCode')).text
except AttributeError:
self.postcode = None
try:
self.country = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('country')).text
except AttributeError:
self.country = None
try:
self.email = elem.find(ns('contactInfo') + '/' + ns('address') + '/' + ns('electronicMailAddress')).text
except AttributeError:
self.email = None
class ContentMetadata(object):
def __init__(self, elem, service):
# TODO - examine the parent for bounding box info.
# self._parent=parent
self._elem = elem
self._service = service
self.id = elem.find(ns('name')).text
self.title = testXMLValue(elem.find(ns('label')))
self.abstract = testXMLValue(elem.find(ns('description')))
self.keywords = [f.text for f in elem.findall(ns('keywords') + '/' + ns('keyword'))]
self.boundingBox = None # needed for iContentMetadata harmonisation
self.boundingBoxWGS84 = None
b = elem.find(ns('lonLatEnvelope'))
if b is not None:
gmlpositions = b.findall('{http://www.opengis.net/gml}pos')
lc = gmlpositions[0].text
uc = gmlpositions[1].text
self.boundingBoxWGS84 = (
float(lc.split()[0]), float(lc.split()[1]),
float(uc.split()[0]), float(uc.split()[1]),
)
# others not used but needed for iContentMetadata harmonisation
self.styles = None
self.crsOptions = None
self.defaulttimeposition = None
# grid is either a gml:Grid or a gml:RectifiedGrid if supplied as part of the DescribeCoverage response.
def _getGrid(self):
if not hasattr(self, 'descCov'):
self.descCov = self._service.getDescribeCoverage(self.id)
gridelem = self.descCov.find(
ns('CoverageOffering/') + ns('domainSet/') + ns('spatialDomain/') + '{http://www.opengis.net/gml}RectifiedGrid') # noqa
if gridelem is not None:
grid = RectifiedGrid(gridelem)
else:
gridelem = self.descCov.find(
ns('CoverageOffering/') + ns('domainSet/') + ns('spatialDomain/') + '{http://www.opengis.net/gml}Grid') # noqa
grid = Grid(gridelem)
return grid
grid = property(_getGrid, None)
# timelimits are the start/end times, timepositions are all timepoints.
# WCS servers can declare one or both or neither of these.
def _getTimeLimits(self):
timepoints, timelimits = [], []
b = self._elem.find(ns('lonLatEnvelope'))
if b is not None:
timepoints = b.findall('{http://www.opengis.net/gml}timePosition')
else:
# have to make a describeCoverage request...
if not hasattr(self, 'descCov'):
self.descCov = self._service.getDescribeCoverage(self.id)
for pos in self.descCov.findall(
ns('CoverageOffering/') + ns('domainSet/') + ns('temporalDomain/') + '{http://www.opengis.net/gml}timePosition'): # noqa
timepoints.append(pos)
if timepoints:
timelimits = [timepoints[0].text, timepoints[1].text]
return timelimits
timelimits = property(_getTimeLimits, None)
def _getTimePositions(self):
timepositions = []
if not hasattr(self, 'descCov'):
self.descCov = self._service.getDescribeCoverage(self.id)
for pos in self.descCov.findall(
ns('CoverageOffering/') + ns('domainSet/') + ns('temporalDomain/') + '{http://www.opengis.net/gml}timePosition'): # noqa
timepositions.append(pos.text)
return timepositions
timepositions = property(_getTimePositions, None)
def _getOtherBoundingBoxes(self):
bboxes = []
if not hasattr(self, 'descCov'):
self.descCov = self._service.getDescribeCoverage(self.id)
for envelope in self.descCov.findall(
ns('CoverageOffering/') + ns('domainSet/') + ns('spatialDomain/') + '{http://www.opengis.net/gml}Envelope'): # noqa
bbox = {}
bbox['nativeSrs'] = envelope.attrib['srsName']
gmlpositions = envelope.findall('{http://www.opengis.net/gml}pos')
lc = gmlpositions[0].text.split()
uc = gmlpositions[1].text.split()
bbox['bbox'] = (
float(lc[0]), float(lc[1]),
float(uc[0]), float(uc[1])
)
bboxes.append(bbox)
return bboxes
boundingboxes = property(_getOtherBoundingBoxes, None)
def _getSupportedCRSProperty(self):
# gets supported crs info
crss = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('supportedCRSs/') + ns('responseCRSs')):
for crs in elem.text.split(' '):
crss.append(Crs(crs))
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('supportedCRSs/') + ns('requestResponseCRSs')):
for crs in elem.text.split(' '):
crss.append(Crs(crs))
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('supportedCRSs/') + ns('nativeCRSs')):
for crs in elem.text.split(' '):
crss.append(Crs(crs))
return crss
supportedCRS = property(_getSupportedCRSProperty, None)
def _getSupportedFormatsProperty(self):
# gets supported formats info
frmts = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('supportedFormats/') + ns('formats')):
frmts.append(elem.text)
return frmts
supportedFormats = property(_getSupportedFormatsProperty, None)
def _getAxisDescriptionsProperty(self):
# gets any axis descriptions contained in the rangeset (requires a DescribeCoverage call to server).
axisDescs = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns('CoverageOffering/') + ns('rangeSet/') + ns('RangeSet/') + ns('axisDescription/') + ns('AxisDescription')): # noqa
axisDescs.append(AxisDescription(elem)) # create a 'AxisDescription' object.
return axisDescs
axisDescriptions = property(_getAxisDescriptionsProperty, None)
# Adding classes to represent gml:grid and gml:rectifiedgrid. One of these is used for the cvg.grid property
# (where cvg is a member of the contents dictionary)
# There is no simple way to convert the offset values in a rectifiedgrid grid to real values without CRS understanding,
# therefore this is beyond the current scope of owslib, so the representation here is purely to provide access
# to the information in the GML.
class Grid(object):
def __init__(self, grid):
self.axislabels = []
self.dimension = None
self.lowlimits = []
self.highlimits = []
if grid is not None:
self.dimension = int(grid.get('dimension'))
self.lowlimits = grid.find(
'{http://www.opengis.net/gml}limits/{http://www.opengis.net/gml}GridEnvelope/{http://www.opengis.net/gml}low').text.split(' ') # noqa
self.highlimits = grid.find(
'{http://www.opengis.net/gml}limits/{http://www.opengis.net/gml}GridEnvelope/{http://www.opengis.net/gml}high').text.split(' ') # noqa
for axis in grid.findall('{http://www.opengis.net/gml}axisName'):
self.axislabels.append(axis.text)
class RectifiedGrid(Grid):
def __init__(self, rectifiedgrid):
super(RectifiedGrid, self).__init__(rectifiedgrid)
self.origin = rectifiedgrid.find(
'{http://www.opengis.net/gml}origin/{http://www.opengis.net/gml}pos').text.split()
self.offsetvectors = []
for offset in rectifiedgrid.findall('{http://www.opengis.net/gml}offsetVector'):
self.offsetvectors.append(offset.text.split())
class AxisDescription(object):
def __init__(self, axisdescElem):
self.name = self.label = None
self.values = []
for elem in axisdescElem.getchildren():
if elem.tag == ns('name'):
self.name = elem.text
elif elem.tag == ns('label'):
self.label = elem.text
elif elem.tag == ns('values'):
for child in elem.getchildren():
self.values.append(child.text)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.