hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790c007e94046e9aa923f280e03cd1d43702aa8c
| 7,613
|
py
|
Python
|
run_tracking.py
|
MMV-Lab/cell_movie_analysis
|
26ac844a79ee4978db26aea8fc1fc9bd6a19a2c0
|
[
"BSD-2-Clause"
] | null | null | null |
run_tracking.py
|
MMV-Lab/cell_movie_analysis
|
26ac844a79ee4978db26aea8fc1fc9bd6a19a2c0
|
[
"BSD-2-Clause"
] | null | null | null |
run_tracking.py
|
MMV-Lab/cell_movie_analysis
|
26ac844a79ee4978db26aea8fc1fc9bd6a19a2c0
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import numpy as np
from glob import glob
from scipy import optimize, spatial, ndimage
from tifffile import imread, imsave
from skimage.segmentation import find_boundaries
from skimage.morphology import remove_small_objects
from skimage.draw import line
from utils import random_colormap
import pdb
# define binarization function
def prepare_binary(fn):
# generate binary segmentaiton result
seg = np.squeeze(imread(fn)) > bw_th
seg = remove_small_objects(seg>0, min_size=min_obj_size)
return seg
# params
max_matching_dist = 45
approx_inf = 65535
track_display_legnth = 20
min_obj_size = 20
bw_th = -0.5
parent_path = "/mnt/data/"
all_movies = glob(parent_path + "timelapse/*.tiff")
for M_idx, movies in enumerate(all_movies):
movie_basename = os.path.basename(movies)
well_name = movie_basename[:-5]
seg_path = f"{parent_path}timelapse_seg/{well_name}/"
# vis_path = f"{parent_path}timelapse_track/{well_name}"
# os.makedirs(vis_path, exist_ok=True)
raw_path = f"{parent_path}timelapse/{well_name}"
track_result = f"{parent_path}timelapse_track/{well_name}_result.npy"
total_time = len(glob(raw_path + "/*.tiff"))
traj = dict()
lineage = dict()
for tt in range(total_time):
seg_fn = seg_path + f"img_{tt}_segmentation.tiff"
seg = prepare_binary(seg_fn)
# get label image
seg_label, num_cells = ndimage.label(seg)
# calculate center of mass
centroid = ndimage.center_of_mass(seg, labels=seg_label, index=np.arange(1, num_cells + 1))
# generate cell information of this frame
traj.update({
tt : {"centroid": centroid, "parent": [], "child": [], "ID": []}
})
# initialize trajectory ID, parent node, track pts for the first frame
max_cell_id = len(traj[0].get("centroid"))
traj[0].update(
{"ID": np.arange(0, max_cell_id, 1)}
)
traj[0].update(
{"parent": -1 * np.ones(max_cell_id, dtype=int)}
)
centers = traj[0].get("centroid")
pts = []
for ii in range(max_cell_id):
pts.append([centers[ii]])
lineage.update({ii: [centers[ii]]})
traj[0].update({"track_pts": pts})
for tt in np.arange(1, total_time):
p_prev = traj[tt-1].get("centroid")
p_next = traj[tt].get("centroid")
###########################################################
# simple LAP tracking
###########################################################
num_cell_prev = len(p_prev)
num_cell_next = len(p_next)
# calculate distance between each pair of cells
cost_mat = spatial.distance.cdist(p_prev, p_next)
# if the distance is too far, change to approx. Inf.
cost_mat[cost_mat > max_matching_dist] = approx_inf
# add edges from cells in previous frame to auxillary vertices
# in order to accomendate segmentation errors and leaving cells
cost_mat_aug = max_matching_dist * 1.2 * np.ones(
(num_cell_prev, num_cell_next + num_cell_prev), dtype=float
)
cost_mat_aug[:num_cell_prev, :num_cell_next] = cost_mat[:, :]
# solve the optimization problem
row_ind, col_ind = optimize.linear_sum_assignment(cost_mat_aug)
#########################################################
# parse the matching result
#########################################################
prev_child = np.ones(num_cell_prev, dtype=int)
next_parent = np.ones(num_cell_next, dtype=int)
next_ID = np.zeros(num_cell_next, dtype=int)
next_track_pts = []
# assign child for cells in previous frame
for ii in range(num_cell_prev):
if col_ind[ii] >= num_cell_next:
prev_child[ii] = -1
else:
prev_child[ii] = col_ind[ii]
# assign parent for cells in next frame, update ID and track pts
prev_pt = traj[tt-1].get("track_pts")
prev_id = traj[tt-1].get("ID")
for ii in range(num_cell_next):
if ii in col_ind:
# a matched cell is found
next_parent[ii] = np.where(col_ind == ii)[0][0]
next_ID[ii] = prev_id[next_parent[ii]]
current_pts = prev_pt[next_parent[ii]].copy()
current_pts.append(p_next[ii])
if len(current_pts) > track_display_legnth:
current_pts.pop(0)
next_track_pts.append(current_pts)
# attach this point to the lineage
single_lineage = lineage.get(next_ID[ii])
try:
single_lineage.append(p_next[ii])
except Exception:
pdb.set_trace()
lineage.update({next_ID[ii]: single_lineage})
else:
# a new cell
next_parent[ii] = -1
next_ID[ii] = max_cell_id
next_track_pts.append([p_next[ii]])
lineage.update({max_cell_id: [p_next[ii]]})
max_cell_id += 1
# update record
traj[tt-1].update({"child": prev_child})
traj[tt].update({"parent": next_parent})
traj[tt].update({"ID": next_ID})
traj[tt].update({"track_pts": next_track_pts})
np.save(track_result, [traj, lineage])
"""
######################################################
# generate track visualization
######################################################
cmap = random_colormap()
for tt in range(total_time):
# print(traj[tt].get("ID"))
# load segmentation and extract contours
seg_fn = seg_path + f"img_{tt}_segmentation.tiff"
seg = prepare_binary(seg_fn)
seg_label, num_cells = ndimage.label(seg)
cell_contours = find_boundaries(seg, mode='inner').astype(np.uint16)
cell_contours[cell_contours > 0] = 1
cell_contours = cell_contours * seg_label.astype(np.uint16)
cell_contours = cell_contours - 1 # to make the first object has label 0, to match index
# load raw image and create visualizaiton in RGB
# TODO: use real raw images
# raw = seg.astype(np.uint8)
raw = np.squeeze(imread(raw_path + f"img_{tt}.tiff")).astype(np.float32)
raw = (raw - raw.min())/ (raw.max() - raw.min())
raw = raw * 255
raw = raw.astype(np.uint8)
vis = np.zeros((raw.shape[0], raw.shape[1], 3), dtype=np.uint8)
for cc in range(3):
vis[:, :, cc] = raw
# loop through all cells, for each cell, we do the following
# 1- find ID, 2- load the color, 3- draw contour 4- draw track
cell_id = traj[tt].get("ID")
pts = traj[tt].get("track_pts")
for cid in range(num_cells):
# find ID
this_id = cell_id[cid]
# load the color
this_color = 255 * cmap.colors[this_id]
this_color = this_color.astype(np.uint8)
# draw contour
for cc in range(3):
vis_c = vis[:, :, cc]
vis_c[cell_contours == cid] = this_color[cc]
vis[:, :, cc] = vis_c # TODO: check if we need this line
# draw track
this_track = pts[cid]
if len(this_track) < 2:
continue
else:
for pid in range(len(this_track) - 1):
p1 = this_track[pid]
p2 = this_track[pid + 1]
rr, cc = line(int(round(p1[0])), int(round(p1[1])), int(round(p2[0])), int(round(p2[1])))
for ch in range(3):
vis[rr, cc ,ch] = this_color[ch]
imsave(vis_path + f"img_{tt+1}.tiff", vis)
"""
| 35.741784
| 105
| 0.574544
|
import os
import numpy as np
from glob import glob
from scipy import optimize, spatial, ndimage
from tifffile import imread, imsave
from skimage.segmentation import find_boundaries
from skimage.morphology import remove_small_objects
from skimage.draw import line
from utils import random_colormap
import pdb
def prepare_binary(fn):
seg = np.squeeze(imread(fn)) > bw_th
seg = remove_small_objects(seg>0, min_size=min_obj_size)
return seg
max_matching_dist = 45
approx_inf = 65535
track_display_legnth = 20
min_obj_size = 20
bw_th = -0.5
parent_path = "/mnt/data/"
all_movies = glob(parent_path + "timelapse/*.tiff")
for M_idx, movies in enumerate(all_movies):
movie_basename = os.path.basename(movies)
well_name = movie_basename[:-5]
seg_path = f"{parent_path}timelapse_seg/{well_name}/"
raw_path = f"{parent_path}timelapse/{well_name}"
track_result = f"{parent_path}timelapse_track/{well_name}_result.npy"
total_time = len(glob(raw_path + "/*.tiff"))
traj = dict()
lineage = dict()
for tt in range(total_time):
seg_fn = seg_path + f"img_{tt}_segmentation.tiff"
seg = prepare_binary(seg_fn)
seg_label, num_cells = ndimage.label(seg)
centroid = ndimage.center_of_mass(seg, labels=seg_label, index=np.arange(1, num_cells + 1))
traj.update({
tt : {"centroid": centroid, "parent": [], "child": [], "ID": []}
})
max_cell_id = len(traj[0].get("centroid"))
traj[0].update(
{"ID": np.arange(0, max_cell_id, 1)}
)
traj[0].update(
{"parent": -1 * np.ones(max_cell_id, dtype=int)}
)
centers = traj[0].get("centroid")
pts = []
for ii in range(max_cell_id):
pts.append([centers[ii]])
lineage.update({ii: [centers[ii]]})
traj[0].update({"track_pts": pts})
for tt in np.arange(1, total_time):
p_prev = traj[tt-1].get("centroid")
p_next = traj[tt].get("centroid")
| true
| true
|
790c0141eea3aed5871dd3f16d2098900fa6f222
| 1,556
|
py
|
Python
|
taller_estructuras_de_control/ejercicio12.py
|
JuanMPerezM/AlgoritmosyProgramacion_Talleres
|
849e9ff97a34ee3db32d52467d0de5e981d3b0ee
|
[
"MIT"
] | null | null | null |
taller_estructuras_de_control/ejercicio12.py
|
JuanMPerezM/AlgoritmosyProgramacion_Talleres
|
849e9ff97a34ee3db32d52467d0de5e981d3b0ee
|
[
"MIT"
] | null | null | null |
taller_estructuras_de_control/ejercicio12.py
|
JuanMPerezM/AlgoritmosyProgramacion_Talleres
|
849e9ff97a34ee3db32d52467d0de5e981d3b0ee
|
[
"MIT"
] | null | null | null |
"""
Entradas
Monto de dinero -> int -> a
"""
a = int ( input ( "Ingrese monto de dinero en COP:" ))
b = a
billetes_de_100000 = ( b - b % 100000 ) / 100000
b = b % 100000
billetes_de_50000 = ( b - b % 50000 ) / 50000
b = b % 50000
billetes_de_20000 = ( b - b % 20000 ) / 20000
b = b % 20000
billetes_de_10000 = ( b - b % 10000 ) / 10000
b = b % 10000
billetes_de_5000 = ( b - b % 5000 ) / 5000
b = b % 5000
billetes_de_2000 = ( b - b % 2000 ) / 2000
b = b % 2000
billetes_de_1000 = ( b - b % 1000 ) / 1000
b = b % 1000
monedas_de_500 = ( b - b % 500 ) / 500
b = b % 500
monedas_de_200 = ( b - b % 200 ) / 200
b = b % 200
monedas_de_100 = ( b - b % 100 ) / 100
b = b % 100
monedas_de_50 = ( b - b % 50 ) / 50
b = b % 50
print ( "La Cantidad de billetes de 100000 es de:" + str ( billetes_de_100000 ))
print ( "La Cantidad de billetes de 50000 es de:" + str ( billetes_de_50000 ))
print ( "La Cantidad de billetes de 20000 es de:" + str ( billetes_de_20000 ))
print ( "La Cantidad de billetes de 10000 es de:" + str ( billetes_de_10000 ))
print ( "La Cantidad de billetes de 5000 es de:" + str ( billetes_de_5000 ))
print ( "La Cantidad de billetes de 2000 es de:" + str ( billetes_de_2000 ))
print ( "La Cantidad de billetes de 1000 es de:" + str ( billetes_de_1000 ))
print ( "La Cantidad de monedas de 500 es de:" + str ( monedas_de_500 ))
print ( "La Cantidad de monedas de 200 es de:" + str ( monedas_de_200 ))
print ( "La Cantidad de monedas de 100 es de:" + str ( monedas_de_100 ))
print ( "La Cantidad de monedas de 50 es de:" + str ( monedas_de_50 ))
| 39.897436
| 80
| 0.63946
|
a = int ( input ( "Ingrese monto de dinero en COP:" ))
b = a
billetes_de_100000 = ( b - b % 100000 ) / 100000
b = b % 100000
billetes_de_50000 = ( b - b % 50000 ) / 50000
b = b % 50000
billetes_de_20000 = ( b - b % 20000 ) / 20000
b = b % 20000
billetes_de_10000 = ( b - b % 10000 ) / 10000
b = b % 10000
billetes_de_5000 = ( b - b % 5000 ) / 5000
b = b % 5000
billetes_de_2000 = ( b - b % 2000 ) / 2000
b = b % 2000
billetes_de_1000 = ( b - b % 1000 ) / 1000
b = b % 1000
monedas_de_500 = ( b - b % 500 ) / 500
b = b % 500
monedas_de_200 = ( b - b % 200 ) / 200
b = b % 200
monedas_de_100 = ( b - b % 100 ) / 100
b = b % 100
monedas_de_50 = ( b - b % 50 ) / 50
b = b % 50
print ( "La Cantidad de billetes de 100000 es de:" + str ( billetes_de_100000 ))
print ( "La Cantidad de billetes de 50000 es de:" + str ( billetes_de_50000 ))
print ( "La Cantidad de billetes de 20000 es de:" + str ( billetes_de_20000 ))
print ( "La Cantidad de billetes de 10000 es de:" + str ( billetes_de_10000 ))
print ( "La Cantidad de billetes de 5000 es de:" + str ( billetes_de_5000 ))
print ( "La Cantidad de billetes de 2000 es de:" + str ( billetes_de_2000 ))
print ( "La Cantidad de billetes de 1000 es de:" + str ( billetes_de_1000 ))
print ( "La Cantidad de monedas de 500 es de:" + str ( monedas_de_500 ))
print ( "La Cantidad de monedas de 200 es de:" + str ( monedas_de_200 ))
print ( "La Cantidad de monedas de 100 es de:" + str ( monedas_de_100 ))
print ( "La Cantidad de monedas de 50 es de:" + str ( monedas_de_50 ))
| true
| true
|
790c01f5b7dba5ebd1fd66d047862f37cc8aa7bc
| 23,641
|
py
|
Python
|
xadmin/views/dashboard.py
|
edwardvon/xadmin-django3
|
f9f3ed7c6b33896e48ddf59149e1f5bd2cfe780a
|
[
"BSD-3-Clause"
] | 7
|
2020-02-25T08:46:07.000Z
|
2021-07-22T15:25:13.000Z
|
xadmin/views/dashboard.py
|
edwardvon/xadmin-django3
|
f9f3ed7c6b33896e48ddf59149e1f5bd2cfe780a
|
[
"BSD-3-Clause"
] | 6
|
2020-02-25T08:36:17.000Z
|
2021-09-22T18:36:37.000Z
|
xadmin/views/dashboard.py
|
edwardvon/xadmin-django3
|
f9f3ed7c6b33896e48ddf59149e1f5bd2cfe780a
|
[
"BSD-3-Clause"
] | 4
|
2020-02-24T06:58:59.000Z
|
2021-03-02T06:53:46.000Z
|
from django import forms
from django.apps import apps
from django.core.exceptions import PermissionDenied
from django.urls import reverse, NoReverseMatch
from django.template.context_processors import csrf
from django.db.models.base import ModelBase
from django.forms.forms import DeclarativeFieldsMetaclass
from django.forms.utils import flatatt
from django.template import loader
from django.http import Http404
from django.test.client import RequestFactory
from django.utils.encoding import force_text, smart_text
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.http import urlencode, urlquote
from django.views.decorators.cache import never_cache
from xadmin import widgets as exwidgets
from xadmin.layout import FormHelper
from xadmin.models import UserSettings, UserWidget
from xadmin.plugins.utils import get_context_dict
from xadmin.sites import site
from xadmin.views.base import CommAdminView, ModelAdminView, filter_hook, csrf_protect_m
from xadmin.views.edit import CreateAdminView
from xadmin.views.list import ListAdminView
from xadmin.util import unquote, DJANGO_11
import copy
class WidgetTypeSelect(forms.Widget):
def __init__(self, widgets, attrs=None):
super(WidgetTypeSelect, self).__init__(attrs)
self._widgets = widgets
def render(self, name, value, attrs=None, renderer=None):
if value is None:
value = ''
if DJANGO_11:
final_attrs = self.build_attrs(attrs, extra_attrs={'name': name})
else:
final_attrs = self.build_attrs(attrs, name=name)
final_attrs['class'] = 'nav nav-pills nav-stacked'
output = [u'<ul%s>' % flatatt(final_attrs)]
options = self.render_options(force_text(value), final_attrs['id'])
if options:
output.append(options)
output.append(u'</ul>')
output.append('<input type="hidden" id="%s_input" name="%s" value="%s"/>' %
(final_attrs['id'], name, force_text(value)))
return mark_safe(u'\n'.join(output))
def render_option(self, selected_choice, widget, id):
if widget.widget_type == selected_choice:
selected_html = u' class="active"'
else:
selected_html = ''
return (u'<li%s><a onclick="' +
'javascript:$(this).parent().parent().find(\'>li\').removeClass(\'active\');$(this).parent().addClass(\'active\');' +
'$(\'#%s_input\').attr(\'value\', \'%s\')' % (id, widget.widget_type) +
'"><h4><i class="%s"></i> %s</h4><p>%s</p></a></li>') % (
selected_html,
widget.widget_icon,
widget.widget_title or widget.widget_type,
widget.description)
def render_options(self, selected_choice, id):
# Normalize to strings.
output = []
for widget in self._widgets:
output.append(self.render_option(selected_choice, widget, id))
return u'\n'.join(output)
class UserWidgetAdmin(object):
model_icon = 'fa fa-dashboard'
list_display = ('widget_type', 'page_id', 'user')
list_filter = ['user', 'widget_type', 'page_id']
list_display_links = ('widget_type',)
user_fields = ['user']
hidden_menu = True
wizard_form_list = (
(_(u"Widget Type"), ('page_id', 'widget_type')),
(_(u"Widget Params"), {'callback':
"get_widget_params_form", 'convert': "convert_widget_params"})
)
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'widget_type':
widgets = widget_manager.get_widgets(self.request.GET.get('page_id', ''))
form_widget = WidgetTypeSelect(widgets)
return forms.ChoiceField(choices=[(w.widget_type, w.description) for w in widgets],
widget=form_widget, label=_('Widget Type'))
if 'page_id' in self.request.GET and db_field.name == 'page_id':
kwargs['widget'] = forms.HiddenInput
field = super(
UserWidgetAdmin, self).formfield_for_dbfield(db_field, **kwargs)
return field
def get_widget_params_form(self, wizard):
data = wizard.get_cleaned_data_for_step(wizard.steps.first)
widget_type = data['widget_type']
widget = widget_manager.get(widget_type)
fields = copy.deepcopy(widget.base_fields)
if 'id' in fields:
del fields['id']
return DeclarativeFieldsMetaclass("WidgetParamsForm", (forms.Form,), fields)
def convert_widget_params(self, wizard, cleaned_data, form):
widget = UserWidget()
value = dict([(f.name, f.value()) for f in form])
widget.set_value(value)
cleaned_data['value'] = widget.value
cleaned_data['user'] = self.user
def get_list_display(self):
list_display = super(UserWidgetAdmin, self).get_list_display()
if not self.user.is_superuser:
list_display.remove('user')
return list_display
def queryset(self):
if self.user.is_superuser:
return super(UserWidgetAdmin, self).queryset()
return UserWidget.objects.filter(user=self.user)
def update_dashboard(self, obj):
try:
portal_pos = UserSettings.objects.get(
user=obj.user, key="dashboard:%s:pos" % obj.page_id)
except UserSettings.DoesNotExist:
return
pos = [[w for w in col.split(',') if w != str(
obj.id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
def delete_model(self):
self.update_dashboard(self.obj)
super(UserWidgetAdmin, self).delete_model()
def delete_models(self, queryset):
for obj in queryset:
self.update_dashboard(obj)
super(UserWidgetAdmin, self).delete_models(queryset)
site.register(UserWidget, UserWidgetAdmin)
class WidgetManager(object):
_widgets = None
def __init__(self):
self._widgets = {}
def register(self, widget_class):
self._widgets[widget_class.widget_type] = widget_class
return widget_class
def get(self, name):
return self._widgets[name]
def get_widgets(self, page_id):
return self._widgets.values()
widget_manager = WidgetManager()
class WidgetDataError(Exception):
def __init__(self, widget, errors):
super(WidgetDataError, self).__init__(str(errors))
self.widget = widget
self.errors = errors
class BaseWidget(forms.Form):
template = 'xadmin/widgets/base.html'
description = 'Base Widget, don\'t use it.'
widget_title = None
widget_icon = 'fa fa-plus-square'
widget_type = 'base'
base_title = None
id = forms.IntegerField(label=_('Widget ID'), widget=forms.HiddenInput)
title = forms.CharField(label=_('Widget Title'), required=False, widget=exwidgets.AdminTextInputWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
self.admin_site = dashboard.admin_site
self.request = dashboard.request
self.user = dashboard.request.user
self.convert(data)
super(BaseWidget, self).__init__(data)
if not self.is_valid():
raise WidgetDataError(self, self.errors.as_text())
self.setup()
def setup(self):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
self.helper = helper
self.id = self.cleaned_data['id']
self.title = self.cleaned_data['title'] or self.base_title
if not (self.user.is_superuser or self.has_perm()):
raise PermissionDenied
@property
def widget(self):
context = {'widget_id': self.id, 'widget_title': self.title, 'widget_icon': self.widget_icon,
'widget_type': self.widget_type, 'form': self, 'widget': self}
context.update(csrf(self.request))
self.context(context)
return loader.render_to_string(self.template, context)
def context(self, context):
pass
def convert(self, data):
pass
def has_perm(self):
return False
def save(self):
value = dict([(f.name, f.value()) for f in self])
user_widget = UserWidget.objects.get(id=self.id)
user_widget.set_value(value)
user_widget.save()
def static(self, path):
return self.dashboard.static(path)
def vendor(self, *tags):
return self.dashboard.vendor(*tags)
def media(self):
return forms.Media()
@widget_manager.register
class HtmlWidget(BaseWidget):
widget_type = 'html'
widget_icon = 'fa fa-file-o'
description = _(
u'Html Content Widget, can write any html content in widget.')
content = forms.CharField(label=_(
'Html Content'), widget=exwidgets.AdminTextareaWidget, required=False)
def has_perm(self):
return True
def context(self, context):
context['content'] = self.cleaned_data['content']
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
def __iter__(self):
from xadmin import site as g_admin_site
for m, ma in g_admin_site._registry.items():
yield ('%s.%s' % (m._meta.app_label, m._meta.model_name),
m._meta.verbose_name)
class ModelChoiceField(forms.ChoiceField):
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text=None, *args, **kwargs):
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
forms.Field.__init__(self, required=required, widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
self.widget.choices = self.choices
def __deepcopy__(self, memo):
result = forms.Field.__deepcopy__(self, memo)
return result
def _get_choices(self):
return ModelChoiceIterator(self)
choices = property(_get_choices, forms.ChoiceField._set_choices)
def to_python(self, value):
if isinstance(value, ModelBase):
return value
app_label, model_name = value.lower().split('.')
return apps.get_model(app_label, model_name)
def prepare_value(self, value):
if isinstance(value, ModelBase):
value = '%s.%s' % (value._meta.app_label, value._meta.model_name)
return value
def valid_value(self, value):
value = self.prepare_value(value)
for k, v in self.choices:
if value == smart_text(k):
return True
return False
class ModelBaseWidget(BaseWidget):
app_label = None
model_name = None
model_perm = 'change'
model = ModelChoiceField(label=_(u'Target Model'), widget=exwidgets.AdminSelectWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
super(ModelBaseWidget, self).__init__(dashboard, data)
def setup(self):
self.model = self.cleaned_data['model']
self.app_label = self.model._meta.app_label
self.model_name = self.model._meta.model_name
super(ModelBaseWidget, self).setup()
def has_perm(self):
return self.dashboard.has_model_perm(self.model, self.model_perm)
def filte_choices_model(self, model, modeladmin):
return self.dashboard.has_model_perm(model, self.model_perm)
def model_admin_url(self, name, *args, **kwargs):
return reverse(
"%s:%s_%s_%s" % (self.admin_site.app_name, self.app_label,
self.model_name, name), args=args, kwargs=kwargs)
class PartialBaseWidget(BaseWidget):
def get_view_class(self, view_class, model=None, **opts):
admin_class = self.admin_site._registry.get(model) if model else None
return self.admin_site.get_view_class(view_class, admin_class, **opts)
def get_factory(self):
return RequestFactory()
def setup_request(self, request):
request.user = self.user
request.session = self.request.session
return request
def make_get_request(self, path, data={}, **extra):
req = self.get_factory().get(path, data, **extra)
return self.setup_request(req)
def make_post_request(self, path, data={}, **extra):
req = self.get_factory().post(path, data, **extra)
return self.setup_request(req)
@widget_manager.register
class QuickBtnWidget(BaseWidget):
widget_type = 'qbutton'
description = _(u'Quick button Widget, quickly open any page.')
template = "xadmin/widgets/qbutton.html"
base_title = _(u"Quick Buttons")
widget_icon = 'fa fa-caret-square-o-right'
def convert(self, data):
self.q_btns = data.pop('btns', [])
def get_model(self, model_or_label):
if isinstance(model_or_label, ModelBase):
return model_or_label
else:
return apps.get_model(*model_or_label.lower().split('.'))
def context(self, context):
btns = []
for b in self.q_btns:
btn = {}
if 'model' in b:
model = self.get_model(b['model'])
if not self.user.has_perm("%s.view_%s" % (model._meta.app_label, model._meta.model_name)):
continue
btn['url'] = reverse("%s:%s_%s_%s" % (self.admin_site.app_name, model._meta.app_label,
model._meta.model_name, b.get('view', 'changelist')))
btn['title'] = model._meta.verbose_name
btn['icon'] = self.dashboard.get_model_icon(model)
else:
try:
btn['url'] = reverse(b['url'])
except NoReverseMatch:
btn['url'] = b['url']
if 'title' in b:
btn['title'] = b['title']
if 'icon' in b:
btn['icon'] = b['icon']
btns.append(btn)
context.update({'btns': btns})
def has_perm(self):
return True
@widget_manager.register
class ListWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'list'
description = _(u'Any Objects list Widget.')
template = "xadmin/widgets/list.html"
model_perm = 'view'
widget_icon = 'fa fa-align-justify'
def convert(self, data):
self.list_params = data.pop('params', {})
self.list_count = data.pop('count', 10)
def setup(self):
super(ListWidget, self).setup()
if not self.title:
self.title = self.model._meta.verbose_name_plural
req = self.make_get_request("", self.list_params)
self.list_view = self.get_view_class(ListAdminView, self.model)(req)
if self.list_count:
self.list_view.list_per_page = self.list_count
def context(self, context):
list_view = self.list_view
list_view.make_result_list()
base_fields = list_view.base_list_display
if len(base_fields) > 5:
base_fields = base_fields[0:5]
context['result_headers'] = [c for c in list_view.result_headers(
).cells if c.field_name in base_fields]
context['results'] = [[o for i, o in
enumerate(filter(lambda c:c.field_name in base_fields, r.cells))]
for r in list_view.results()]
context['result_count'] = list_view.result_count
context['page_url'] = self.model_admin_url('changelist') + "?" + urlencode(self.list_params)
@widget_manager.register
class AddFormWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'addform'
description = _(u'Add any model object Widget.')
template = "xadmin/widgets/addform.html"
model_perm = 'add'
widget_icon = 'fa fa-plus'
def setup(self):
super(AddFormWidget, self).setup()
if self.title is None:
self.title = _('Add %s') % self.model._meta.verbose_name
req = self.make_get_request("")
self.add_view = self.get_view_class(
CreateAdminView, self.model, list_per_page=10)(req)
self.add_view.instance_forms()
def context(self, context):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
context.update({
'addform': self.add_view.form_obj,
'addhelper': helper,
'addurl': self.add_view.model_admin_url('add'),
'model': self.model
})
def media(self):
return self.add_view.media + self.add_view.form_obj.media + self.vendor('xadmin.plugin.quick-form.js')
class Dashboard(CommAdminView):
widget_customiz = True
widgets = []
title = _(u"Dashboard")
icon = None
def get_page_id(self):
return self.request.path
def get_portal_key(self):
return "dashboard:%s:pos" % self.get_page_id()
@filter_hook
def get_widget(self, widget_or_id, data=None):
try:
if isinstance(widget_or_id, UserWidget):
widget = widget_or_id
else:
widget = UserWidget.objects.get(user=self.user, page_id=self.get_page_id(), id=widget_or_id)
wid = widget_manager.get(widget.widget_type)
class widget_with_perm(wid):
def context(self, context):
super(widget_with_perm, self).context(context)
context.update({'has_change_permission': self.request.user.has_perm('xadmin.change_userwidget')})
wid_instance = widget_with_perm(self, data or widget.get_value())
return wid_instance
except UserWidget.DoesNotExist:
return None
@filter_hook
def get_init_widget(self):
portal = []
widgets = self.widgets
for col in widgets:
portal_col = []
for opts in col:
try:
widget = UserWidget(user=self.user, page_id=self.get_page_id(), widget_type=opts['type'])
widget.set_value(opts)
widget.save()
portal_col.append(self.get_widget(widget))
except (PermissionDenied, WidgetDataError):
widget.delete()
continue
portal.append(portal_col)
UserSettings(
user=self.user, key="dashboard:%s:pos" % self.get_page_id(),
value='|'.join([','.join([str(w.id) for w in col]) for col in portal])).save()
return portal
@filter_hook
def get_widgets(self):
if self.widget_customiz:
portal_pos = UserSettings.objects.filter(
user=self.user, key=self.get_portal_key())
if len(portal_pos):
portal_pos = portal_pos[0].value
widgets = []
if portal_pos:
user_widgets = dict([(uw.id, uw) for uw in UserWidget.objects.filter(user=self.user, page_id=self.get_page_id())])
for col in portal_pos.split('|'):
ws = []
for wid in col.split(','):
try:
widget = user_widgets.get(int(wid))
if widget:
ws.append(self.get_widget(widget))
except Exception as e:
import logging
logging.error(e, exc_info=True)
widgets.append(ws)
return widgets
return self.get_init_widget()
@filter_hook
def get_title(self):
return self.title
@filter_hook
def get_context(self):
new_context = {
'title': self.get_title(),
'icon': self.icon,
'portal_key': self.get_portal_key(),
'columns': [('col-sm-%d' % int(12 / len(self.widgets)), ws) for ws in self.widgets],
'has_add_widget_permission': self.has_model_perm(UserWidget, 'add') and self.widget_customiz,
'add_widget_url': self.get_admin_url('%s_%s_add' % (UserWidget._meta.app_label, UserWidget._meta.model_name)) +
"?user=%s&page_id=%s&_redirect=%s" % (self.user.id, self.get_page_id(), urlquote(self.request.get_full_path()))
}
context = super(Dashboard, self).get_context()
context.update(new_context)
return context
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self.get_widgets()
return self.template_response('xadmin/views/dashboard.html', self.get_context())
@csrf_protect_m
def post(self, request, *args, **kwargs):
if 'id' in request.POST:
widget_id = request.POST['id']
if request.POST.get('_delete', None) != 'on':
widget = self.get_widget(widget_id, request.POST.copy())
widget.save()
else:
try:
widget = UserWidget.objects.get(
user=self.user, page_id=self.get_page_id(), id=widget_id)
widget.delete()
try:
portal_pos = UserSettings.objects.get(user=self.user, key="dashboard:%s:pos" % self.get_page_id())
pos = [[w for w in col.split(',') if w != str(
widget_id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
except Exception:
pass
except UserWidget.DoesNotExist:
pass
return self.get(request)
@filter_hook
def get_media(self):
media = super(Dashboard, self).get_media() + \
self.vendor('xadmin.page.dashboard.js', 'xadmin.page.dashboard.css')
if self.widget_customiz:
media = media + self.vendor('xadmin.plugin.portal.js')
for ws in self.widgets:
for widget in ws:
media = media + widget.media()
return media
class ModelDashboard(Dashboard, ModelAdminView):
title = _(u"%s Dashboard")
def get_page_id(self):
return 'model:%s/%s' % self.model_info
@filter_hook
def get_title(self):
return self.title % force_text(self.obj)
def init_request(self, object_id, *args, **kwargs):
self.obj = self.get_object(unquote(object_id))
if not self.has_view_permission(self.obj):
raise PermissionDenied
if self.obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(self.opts.verbose_name), 'key': escape(object_id)})
@filter_hook
def get_context(self):
new_context = {
'has_change_permission': self.has_change_permission(self.obj),
'object': self.obj,
}
context = Dashboard.get_context(self)
context.update(ModelAdminView.get_context(self))
context.update(new_context)
return context
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self.get_widgets()
return self.template_response(self.get_template_list('views/model_dashboard.html'), self.get_context())
| 35.285075
| 134
| 0.609069
|
from django import forms
from django.apps import apps
from django.core.exceptions import PermissionDenied
from django.urls import reverse, NoReverseMatch
from django.template.context_processors import csrf
from django.db.models.base import ModelBase
from django.forms.forms import DeclarativeFieldsMetaclass
from django.forms.utils import flatatt
from django.template import loader
from django.http import Http404
from django.test.client import RequestFactory
from django.utils.encoding import force_text, smart_text
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.http import urlencode, urlquote
from django.views.decorators.cache import never_cache
from xadmin import widgets as exwidgets
from xadmin.layout import FormHelper
from xadmin.models import UserSettings, UserWidget
from xadmin.plugins.utils import get_context_dict
from xadmin.sites import site
from xadmin.views.base import CommAdminView, ModelAdminView, filter_hook, csrf_protect_m
from xadmin.views.edit import CreateAdminView
from xadmin.views.list import ListAdminView
from xadmin.util import unquote, DJANGO_11
import copy
class WidgetTypeSelect(forms.Widget):
def __init__(self, widgets, attrs=None):
super(WidgetTypeSelect, self).__init__(attrs)
self._widgets = widgets
def render(self, name, value, attrs=None, renderer=None):
if value is None:
value = ''
if DJANGO_11:
final_attrs = self.build_attrs(attrs, extra_attrs={'name': name})
else:
final_attrs = self.build_attrs(attrs, name=name)
final_attrs['class'] = 'nav nav-pills nav-stacked'
output = [u'<ul%s>' % flatatt(final_attrs)]
options = self.render_options(force_text(value), final_attrs['id'])
if options:
output.append(options)
output.append(u'</ul>')
output.append('<input type="hidden" id="%s_input" name="%s" value="%s"/>' %
(final_attrs['id'], name, force_text(value)))
return mark_safe(u'\n'.join(output))
def render_option(self, selected_choice, widget, id):
if widget.widget_type == selected_choice:
selected_html = u' class="active"'
else:
selected_html = ''
return (u'<li%s><a onclick="' +
'javascript:$(this).parent().parent().find(\'>li\').removeClass(\'active\');$(this).parent().addClass(\'active\');' +
'$(\'#%s_input\').attr(\'value\', \'%s\')' % (id, widget.widget_type) +
'"><h4><i class="%s"></i> %s</h4><p>%s</p></a></li>') % (
selected_html,
widget.widget_icon,
widget.widget_title or widget.widget_type,
widget.description)
def render_options(self, selected_choice, id):
output = []
for widget in self._widgets:
output.append(self.render_option(selected_choice, widget, id))
return u'\n'.join(output)
class UserWidgetAdmin(object):
model_icon = 'fa fa-dashboard'
list_display = ('widget_type', 'page_id', 'user')
list_filter = ['user', 'widget_type', 'page_id']
list_display_links = ('widget_type',)
user_fields = ['user']
hidden_menu = True
wizard_form_list = (
(_(u"Widget Type"), ('page_id', 'widget_type')),
(_(u"Widget Params"), {'callback':
"get_widget_params_form", 'convert': "convert_widget_params"})
)
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'widget_type':
widgets = widget_manager.get_widgets(self.request.GET.get('page_id', ''))
form_widget = WidgetTypeSelect(widgets)
return forms.ChoiceField(choices=[(w.widget_type, w.description) for w in widgets],
widget=form_widget, label=_('Widget Type'))
if 'page_id' in self.request.GET and db_field.name == 'page_id':
kwargs['widget'] = forms.HiddenInput
field = super(
UserWidgetAdmin, self).formfield_for_dbfield(db_field, **kwargs)
return field
def get_widget_params_form(self, wizard):
data = wizard.get_cleaned_data_for_step(wizard.steps.first)
widget_type = data['widget_type']
widget = widget_manager.get(widget_type)
fields = copy.deepcopy(widget.base_fields)
if 'id' in fields:
del fields['id']
return DeclarativeFieldsMetaclass("WidgetParamsForm", (forms.Form,), fields)
def convert_widget_params(self, wizard, cleaned_data, form):
widget = UserWidget()
value = dict([(f.name, f.value()) for f in form])
widget.set_value(value)
cleaned_data['value'] = widget.value
cleaned_data['user'] = self.user
def get_list_display(self):
list_display = super(UserWidgetAdmin, self).get_list_display()
if not self.user.is_superuser:
list_display.remove('user')
return list_display
def queryset(self):
if self.user.is_superuser:
return super(UserWidgetAdmin, self).queryset()
return UserWidget.objects.filter(user=self.user)
def update_dashboard(self, obj):
try:
portal_pos = UserSettings.objects.get(
user=obj.user, key="dashboard:%s:pos" % obj.page_id)
except UserSettings.DoesNotExist:
return
pos = [[w for w in col.split(',') if w != str(
obj.id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
def delete_model(self):
self.update_dashboard(self.obj)
super(UserWidgetAdmin, self).delete_model()
def delete_models(self, queryset):
for obj in queryset:
self.update_dashboard(obj)
super(UserWidgetAdmin, self).delete_models(queryset)
site.register(UserWidget, UserWidgetAdmin)
class WidgetManager(object):
_widgets = None
def __init__(self):
self._widgets = {}
def register(self, widget_class):
self._widgets[widget_class.widget_type] = widget_class
return widget_class
def get(self, name):
return self._widgets[name]
def get_widgets(self, page_id):
return self._widgets.values()
widget_manager = WidgetManager()
class WidgetDataError(Exception):
def __init__(self, widget, errors):
super(WidgetDataError, self).__init__(str(errors))
self.widget = widget
self.errors = errors
class BaseWidget(forms.Form):
template = 'xadmin/widgets/base.html'
description = 'Base Widget, don\'t use it.'
widget_title = None
widget_icon = 'fa fa-plus-square'
widget_type = 'base'
base_title = None
id = forms.IntegerField(label=_('Widget ID'), widget=forms.HiddenInput)
title = forms.CharField(label=_('Widget Title'), required=False, widget=exwidgets.AdminTextInputWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
self.admin_site = dashboard.admin_site
self.request = dashboard.request
self.user = dashboard.request.user
self.convert(data)
super(BaseWidget, self).__init__(data)
if not self.is_valid():
raise WidgetDataError(self, self.errors.as_text())
self.setup()
def setup(self):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
self.helper = helper
self.id = self.cleaned_data['id']
self.title = self.cleaned_data['title'] or self.base_title
if not (self.user.is_superuser or self.has_perm()):
raise PermissionDenied
@property
def widget(self):
context = {'widget_id': self.id, 'widget_title': self.title, 'widget_icon': self.widget_icon,
'widget_type': self.widget_type, 'form': self, 'widget': self}
context.update(csrf(self.request))
self.context(context)
return loader.render_to_string(self.template, context)
def context(self, context):
pass
def convert(self, data):
pass
def has_perm(self):
return False
def save(self):
value = dict([(f.name, f.value()) for f in self])
user_widget = UserWidget.objects.get(id=self.id)
user_widget.set_value(value)
user_widget.save()
def static(self, path):
return self.dashboard.static(path)
def vendor(self, *tags):
return self.dashboard.vendor(*tags)
def media(self):
return forms.Media()
@widget_manager.register
class HtmlWidget(BaseWidget):
widget_type = 'html'
widget_icon = 'fa fa-file-o'
description = _(
u'Html Content Widget, can write any html content in widget.')
content = forms.CharField(label=_(
'Html Content'), widget=exwidgets.AdminTextareaWidget, required=False)
def has_perm(self):
return True
def context(self, context):
context['content'] = self.cleaned_data['content']
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
def __iter__(self):
from xadmin import site as g_admin_site
for m, ma in g_admin_site._registry.items():
yield ('%s.%s' % (m._meta.app_label, m._meta.model_name),
m._meta.verbose_name)
class ModelChoiceField(forms.ChoiceField):
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text=None, *args, **kwargs):
# Call Field instead of ChoiceField __init__() because we don't need
forms.Field.__init__(self, required=required, widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
self.widget.choices = self.choices
def __deepcopy__(self, memo):
result = forms.Field.__deepcopy__(self, memo)
return result
def _get_choices(self):
return ModelChoiceIterator(self)
choices = property(_get_choices, forms.ChoiceField._set_choices)
def to_python(self, value):
if isinstance(value, ModelBase):
return value
app_label, model_name = value.lower().split('.')
return apps.get_model(app_label, model_name)
def prepare_value(self, value):
if isinstance(value, ModelBase):
value = '%s.%s' % (value._meta.app_label, value._meta.model_name)
return value
def valid_value(self, value):
value = self.prepare_value(value)
for k, v in self.choices:
if value == smart_text(k):
return True
return False
class ModelBaseWidget(BaseWidget):
app_label = None
model_name = None
model_perm = 'change'
model = ModelChoiceField(label=_(u'Target Model'), widget=exwidgets.AdminSelectWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
super(ModelBaseWidget, self).__init__(dashboard, data)
def setup(self):
self.model = self.cleaned_data['model']
self.app_label = self.model._meta.app_label
self.model_name = self.model._meta.model_name
super(ModelBaseWidget, self).setup()
def has_perm(self):
return self.dashboard.has_model_perm(self.model, self.model_perm)
def filte_choices_model(self, model, modeladmin):
return self.dashboard.has_model_perm(model, self.model_perm)
def model_admin_url(self, name, *args, **kwargs):
return reverse(
"%s:%s_%s_%s" % (self.admin_site.app_name, self.app_label,
self.model_name, name), args=args, kwargs=kwargs)
class PartialBaseWidget(BaseWidget):
def get_view_class(self, view_class, model=None, **opts):
admin_class = self.admin_site._registry.get(model) if model else None
return self.admin_site.get_view_class(view_class, admin_class, **opts)
def get_factory(self):
return RequestFactory()
def setup_request(self, request):
request.user = self.user
request.session = self.request.session
return request
def make_get_request(self, path, data={}, **extra):
req = self.get_factory().get(path, data, **extra)
return self.setup_request(req)
def make_post_request(self, path, data={}, **extra):
req = self.get_factory().post(path, data, **extra)
return self.setup_request(req)
@widget_manager.register
class QuickBtnWidget(BaseWidget):
widget_type = 'qbutton'
description = _(u'Quick button Widget, quickly open any page.')
template = "xadmin/widgets/qbutton.html"
base_title = _(u"Quick Buttons")
widget_icon = 'fa fa-caret-square-o-right'
def convert(self, data):
self.q_btns = data.pop('btns', [])
def get_model(self, model_or_label):
if isinstance(model_or_label, ModelBase):
return model_or_label
else:
return apps.get_model(*model_or_label.lower().split('.'))
def context(self, context):
btns = []
for b in self.q_btns:
btn = {}
if 'model' in b:
model = self.get_model(b['model'])
if not self.user.has_perm("%s.view_%s" % (model._meta.app_label, model._meta.model_name)):
continue
btn['url'] = reverse("%s:%s_%s_%s" % (self.admin_site.app_name, model._meta.app_label,
model._meta.model_name, b.get('view', 'changelist')))
btn['title'] = model._meta.verbose_name
btn['icon'] = self.dashboard.get_model_icon(model)
else:
try:
btn['url'] = reverse(b['url'])
except NoReverseMatch:
btn['url'] = b['url']
if 'title' in b:
btn['title'] = b['title']
if 'icon' in b:
btn['icon'] = b['icon']
btns.append(btn)
context.update({'btns': btns})
def has_perm(self):
return True
@widget_manager.register
class ListWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'list'
description = _(u'Any Objects list Widget.')
template = "xadmin/widgets/list.html"
model_perm = 'view'
widget_icon = 'fa fa-align-justify'
def convert(self, data):
self.list_params = data.pop('params', {})
self.list_count = data.pop('count', 10)
def setup(self):
super(ListWidget, self).setup()
if not self.title:
self.title = self.model._meta.verbose_name_plural
req = self.make_get_request("", self.list_params)
self.list_view = self.get_view_class(ListAdminView, self.model)(req)
if self.list_count:
self.list_view.list_per_page = self.list_count
def context(self, context):
list_view = self.list_view
list_view.make_result_list()
base_fields = list_view.base_list_display
if len(base_fields) > 5:
base_fields = base_fields[0:5]
context['result_headers'] = [c for c in list_view.result_headers(
).cells if c.field_name in base_fields]
context['results'] = [[o for i, o in
enumerate(filter(lambda c:c.field_name in base_fields, r.cells))]
for r in list_view.results()]
context['result_count'] = list_view.result_count
context['page_url'] = self.model_admin_url('changelist') + "?" + urlencode(self.list_params)
@widget_manager.register
class AddFormWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'addform'
description = _(u'Add any model object Widget.')
template = "xadmin/widgets/addform.html"
model_perm = 'add'
widget_icon = 'fa fa-plus'
def setup(self):
super(AddFormWidget, self).setup()
if self.title is None:
self.title = _('Add %s') % self.model._meta.verbose_name
req = self.make_get_request("")
self.add_view = self.get_view_class(
CreateAdminView, self.model, list_per_page=10)(req)
self.add_view.instance_forms()
def context(self, context):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
context.update({
'addform': self.add_view.form_obj,
'addhelper': helper,
'addurl': self.add_view.model_admin_url('add'),
'model': self.model
})
def media(self):
return self.add_view.media + self.add_view.form_obj.media + self.vendor('xadmin.plugin.quick-form.js')
class Dashboard(CommAdminView):
widget_customiz = True
widgets = []
title = _(u"Dashboard")
icon = None
def get_page_id(self):
return self.request.path
def get_portal_key(self):
return "dashboard:%s:pos" % self.get_page_id()
@filter_hook
def get_widget(self, widget_or_id, data=None):
try:
if isinstance(widget_or_id, UserWidget):
widget = widget_or_id
else:
widget = UserWidget.objects.get(user=self.user, page_id=self.get_page_id(), id=widget_or_id)
wid = widget_manager.get(widget.widget_type)
class widget_with_perm(wid):
def context(self, context):
super(widget_with_perm, self).context(context)
context.update({'has_change_permission': self.request.user.has_perm('xadmin.change_userwidget')})
wid_instance = widget_with_perm(self, data or widget.get_value())
return wid_instance
except UserWidget.DoesNotExist:
return None
@filter_hook
def get_init_widget(self):
portal = []
widgets = self.widgets
for col in widgets:
portal_col = []
for opts in col:
try:
widget = UserWidget(user=self.user, page_id=self.get_page_id(), widget_type=opts['type'])
widget.set_value(opts)
widget.save()
portal_col.append(self.get_widget(widget))
except (PermissionDenied, WidgetDataError):
widget.delete()
continue
portal.append(portal_col)
UserSettings(
user=self.user, key="dashboard:%s:pos" % self.get_page_id(),
value='|'.join([','.join([str(w.id) for w in col]) for col in portal])).save()
return portal
@filter_hook
def get_widgets(self):
if self.widget_customiz:
portal_pos = UserSettings.objects.filter(
user=self.user, key=self.get_portal_key())
if len(portal_pos):
portal_pos = portal_pos[0].value
widgets = []
if portal_pos:
user_widgets = dict([(uw.id, uw) for uw in UserWidget.objects.filter(user=self.user, page_id=self.get_page_id())])
for col in portal_pos.split('|'):
ws = []
for wid in col.split(','):
try:
widget = user_widgets.get(int(wid))
if widget:
ws.append(self.get_widget(widget))
except Exception as e:
import logging
logging.error(e, exc_info=True)
widgets.append(ws)
return widgets
return self.get_init_widget()
@filter_hook
def get_title(self):
return self.title
@filter_hook
def get_context(self):
new_context = {
'title': self.get_title(),
'icon': self.icon,
'portal_key': self.get_portal_key(),
'columns': [('col-sm-%d' % int(12 / len(self.widgets)), ws) for ws in self.widgets],
'has_add_widget_permission': self.has_model_perm(UserWidget, 'add') and self.widget_customiz,
'add_widget_url': self.get_admin_url('%s_%s_add' % (UserWidget._meta.app_label, UserWidget._meta.model_name)) +
"?user=%s&page_id=%s&_redirect=%s" % (self.user.id, self.get_page_id(), urlquote(self.request.get_full_path()))
}
context = super(Dashboard, self).get_context()
context.update(new_context)
return context
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self.get_widgets()
return self.template_response('xadmin/views/dashboard.html', self.get_context())
@csrf_protect_m
def post(self, request, *args, **kwargs):
if 'id' in request.POST:
widget_id = request.POST['id']
if request.POST.get('_delete', None) != 'on':
widget = self.get_widget(widget_id, request.POST.copy())
widget.save()
else:
try:
widget = UserWidget.objects.get(
user=self.user, page_id=self.get_page_id(), id=widget_id)
widget.delete()
try:
portal_pos = UserSettings.objects.get(user=self.user, key="dashboard:%s:pos" % self.get_page_id())
pos = [[w for w in col.split(',') if w != str(
widget_id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
except Exception:
pass
except UserWidget.DoesNotExist:
pass
return self.get(request)
@filter_hook
def get_media(self):
media = super(Dashboard, self).get_media() + \
self.vendor('xadmin.page.dashboard.js', 'xadmin.page.dashboard.css')
if self.widget_customiz:
media = media + self.vendor('xadmin.plugin.portal.js')
for ws in self.widgets:
for widget in ws:
media = media + widget.media()
return media
class ModelDashboard(Dashboard, ModelAdminView):
title = _(u"%s Dashboard")
def get_page_id(self):
return 'model:%s/%s' % self.model_info
@filter_hook
def get_title(self):
return self.title % force_text(self.obj)
def init_request(self, object_id, *args, **kwargs):
self.obj = self.get_object(unquote(object_id))
if not self.has_view_permission(self.obj):
raise PermissionDenied
if self.obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(self.opts.verbose_name), 'key': escape(object_id)})
@filter_hook
def get_context(self):
new_context = {
'has_change_permission': self.has_change_permission(self.obj),
'object': self.obj,
}
context = Dashboard.get_context(self)
context.update(ModelAdminView.get_context(self))
context.update(new_context)
return context
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self.get_widgets()
return self.template_response(self.get_template_list('views/model_dashboard.html'), self.get_context())
| true
| true
|
790c02034a0ad703f5ce9fa5ce3bd8f177051c9a
| 44,900
|
py
|
Python
|
platipy/dicom/io/crawl.py
|
RadiotherapyAI/platipy
|
53294789a3805ea088c9953027f4ab09a614f052
|
[
"Apache-2.0"
] | 26
|
2020-10-26T17:30:00.000Z
|
2022-03-07T01:21:37.000Z
|
platipy/dicom/io/crawl.py
|
RadiotherapyAI/platipy
|
53294789a3805ea088c9953027f4ab09a614f052
|
[
"Apache-2.0"
] | 20
|
2020-10-01T04:05:37.000Z
|
2022-03-29T23:27:11.000Z
|
platipy/dicom/io/crawl.py
|
RadiotherapyAI/platipy
|
53294789a3805ea088c9953027f4ab09a614f052
|
[
"Apache-2.0"
] | 5
|
2020-10-01T03:33:36.000Z
|
2021-02-20T09:58:30.000Z
|
# Copyright 2020 University of New South Wales, University of Sydney
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import pathlib
import pydicom
import numpy as np
import SimpleITK as sitk
from skimage.draw import polygon
from loguru import logger
from datetime import datetime
def flatten(itr):
if type(itr) in (str, bytes, sitk.Image):
yield itr
else:
for x in itr:
try:
yield from flatten(x)
except TypeError:
yield x
def get_suv_bw_scale_factor(ds):
# Modified from
# https://qibawiki.rsna.org/images/6/62/SUV_vendorneutral_pseudocode_happypathonly_20180626_DAC.pdf
if ds.Units == "CNTS":
# Try to find the Philips private scale factor")
return float(ds[0x7053, 0x1000].value)
assert ds.Modality == "PT"
assert "DECY" in ds.CorrectedImage
assert "ATTN" in ds.CorrectedImage
assert "START" in ds.DecayCorrection
assert ds.Units == "BQML"
half_life = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideHalfLife)
if "SeriesTime" in ds:
series_date_time = ds.SeriesDate + "_" + ds.SeriesTime
if "." in series_date_time:
series_date_time = series_date_time[
: -(len(series_date_time) - series_date_time.index("."))
]
series_date_time = datetime.strptime(series_date_time, "%Y%m%d_%H%M%S")
if "SeriesTime" in ds:
start_time = (
ds.SeriesDate
+ "_"
+ ds.RadiopharmaceuticalInformationSequence[0].RadiopharmaceuticalStartTime
)
if "." in start_time:
start_time = start_time[: -(len(start_time) - start_time.index("."))]
start_time = datetime.strptime(start_time, "%Y%m%d_%H%M%S")
decay_time = (series_date_time - start_time).seconds
injected_dose = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideTotalDose)
decayed_dose = injected_dose * pow(2, -decay_time / half_life)
patient_weight = float(ds.PatientWeight)
suv_bw_scale_factor = patient_weight * 1000 / decayed_dose
return suv_bw_scale_factor
def get_dicom_info_from_description(dicom_object, return_extra=False, sop_class_name="UNKNOWN"):
"""
Attempts to return some information from a DICOM
This is typically used for naming converted NIFTI files
Args:
dicom_object (pydicom.dataset.FileDataset): The DICOM object
return_extra (bool, optional): return information that is usually not required
Returns:
info (str): Some extracted information
"""
try:
dicom_sop_class_name = dicom_object.SOPClassUID.name
except AttributeError:
logger.warning(f"Could not find DICOM SOP Class UID, using {sop_class_name}.")
dicom_sop_class_name = sop_class_name
if "Image" in dicom_sop_class_name:
# Get the modality
image_modality = dicom_object.Modality
logger.info(f" Image modality: {image_modality}")
if image_modality == "CT":
# There is typically not much extra information
# At the moment, we do not return anything for CT imaging
if return_extra:
try:
protocol_name = dicom_object.ProtocolName
if protocol_name != "":
return re.sub(r"[^\w]", "_", protocol_name).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
return ""
elif image_modality == "MR":
# Not much consistency, but we can get the protocol name
try:
protocol_name = re.sub(r"[^\w]", "_", dicom_object.ProtocolName).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
protocol_name = ""
try:
sequence_name = re.sub(r"[^\w]", "_", dicom_object.SequenceName).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
sequence_name = ""
try:
series_description = re.sub(r"[^\w]", "_", dicom_object.SeriesDescription).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
series_description = ""
combined_name = "_".join([protocol_name, sequence_name, series_description])
while "__" in combined_name:
combined_name = combined_name.replace("__", "_")
if protocol_name != "" and not return_extra:
return protocol_name
else:
return combined_name
elif image_modality == "PT":
# Not much experience with this
# We can search through the corrections applied
# Return whether or not attentuation is applied
try:
corrections = dicom_object.CorrectedImage
except AttributeError:
corrections = "NONE"
if "ATTN" in corrections:
return "AC"
else:
return "NAC"
def safe_sort_dicom_image_list(dicom_image_list):
"""
Sorts a list of DICOM image files based on a DICOM tag value.
This is a much safer method than reading SliceLocation.
It takes mandatory DICOM fields (Image Position [Patient]) and (Image Orientation [Patient]).
The list of DICOM files is sorted by projecting the image position onto the axis normal to the
place defined by the image orientation.
This accounts for differences in patient position (e.g. HFS/FFS).
Args:
dicom_image_list (list): [description]
"""
sorted_dict = {}
for dicom_file in dicom_image_list:
dcm = pydicom.read_file(dicom_file, force=True)
image_position = np.array(dcm.ImagePositionPatient, dtype=float)
image_orientation = np.array(dcm.ImageOrientationPatient, dtype=float)
image_plane_normal = np.cross(image_orientation[:3], image_orientation[3:])
slice_location = (image_position * image_plane_normal)[2]
sorted_dict[dicom_file] = slice_location
sorter_safe = lambda dcm_file: sorted_dict[dcm_file]
return sorted(dicom_image_list, key=sorter_safe)
def fix_missing_data(contour_data_list):
"""
Fixes missing points in contouring using simple linear interpolation
Args:
contour_data_list (list): The contour data for each slice
Returns:
contour_data (numpy array): Interpolated contour data
"""
contour_data = np.array(contour_data_list)
if contour_data.any() == "":
logger.warning(" Missing values detected.")
missing_values = np.where(contour_data == "")[0]
if missing_values.shape[0] > 1:
logger.warning(" More than one value missing, fixing this isn't implemented yet...")
else:
logger.warning(" Only one value missing.")
missing_index = missing_values[0]
missing_axis = missing_index % 3
if missing_axis == 0:
logger.warning(" Missing value in x axis: interpolating.")
if missing_index > len(contour_data) - 3:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[0]
elif missing_index == 0:
lower_val = contour_data[-3]
upper_val = contour_data[3]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
elif missing_axis == 1:
logger.warning(" Missing value in y axis: interpolating.")
if missing_index > len(contour_data) - 2:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[1]
elif missing_index == 0:
lower_val = contour_data[-2]
upper_val = contour_data[4]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
else:
logger.warning(" Missing value in z axis: taking slice value")
temp = contour_data[2::3].tolist()
temp.remove("")
contour_data[missing_index] = np.min(np.array(temp, dtype=np.double))
return contour_data
def transform_point_set_from_dicom_struct(image, dicom_struct, spacing_override=False):
"""
This function is used to generate a binary mask from a set of vertices.
This allows us to convert from DICOM-RTStruct format to any imaging format.
Args:
image ([SimpleITK.Image]): The image, used to copy imaging information
(e.g. resolution, spacing)
dicom_struct ([pydicom.Dataset]): The DICOM-RTStruct file
spacing_override (bool | tuple, optional): Overwrite the spacing.
Set with (axial_spacing, coronal_spacing, sagittal spacing). Defaults to False.
Returns:
list, list : final_struct_name_sequence, structure_list
"""
if spacing_override:
current_spacing = list(image.GetSpacing())
new_spacing = tuple(
[
current_spacing[k] if spacing_override[k] == 0 else spacing_override[k]
for k in range(3)
]
)
image.SetSpacing(new_spacing)
struct_point_sequence = dicom_struct.ROIContourSequence
struct_name_sequence = [
"_".join(i.ROIName.split()) for i in dicom_struct.StructureSetROISequence
]
structure_list = []
final_struct_name_sequence = []
for structIndex, structure_name in enumerate(struct_name_sequence):
image_blank = np.zeros(image.GetSize()[::-1], dtype=np.uint8)
logger.info(
" Converting structure {0} with name: {1}".format(structIndex, structure_name)
)
if structIndex >= len(struct_point_sequence):
logger.warning(" Contour sequence is missing, skipping.")
continue
if not hasattr(struct_point_sequence[structIndex], "ContourSequence"):
logger.warning(" No contour sequence found for this structure, skipping.")
continue
if len(struct_point_sequence[structIndex].ContourSequence) == 0:
logger.warning(" Contour sequence is empty, skipping.")
continue
if (
not struct_point_sequence[structIndex].ContourSequence[0].ContourGeometricType
== "CLOSED_PLANAR"
):
logger.warning(" This is not a closed planar structure, skipping.")
continue
for sl in range(len(struct_point_sequence[structIndex].ContourSequence)):
contour_data = fix_missing_data(
struct_point_sequence[structIndex].ContourSequence[sl].ContourData
)
struct_slice_contour_data = np.array(contour_data, dtype=np.double)
vertexArr_physical = struct_slice_contour_data.reshape(
struct_slice_contour_data.shape[0] // 3, 3
)
point_arr = np.array(
[image.TransformPhysicalPointToIndex(i) for i in vertexArr_physical]
).T
[xVertexArr_image, yVertexArr_image] = point_arr[[0, 1]]
zIndex = point_arr[2][0]
if np.any(point_arr[2] != zIndex):
logger.error(" Axial slice index varies in contour. Quitting now.")
logger.error(" Structure: {0}".format(structure_name))
logger.error(" Slice index: {0}".format(zIndex))
quit()
if zIndex >= image.GetSize()[2]:
logger.warning(" Slice index greater than image size. Skipping slice.")
logger.warning(" Structure: {0}".format(structure_name))
logger.warning(" Slice index: {0}".format(zIndex))
continue
sliceArr = np.zeros(image.GetSize()[:2], dtype=np.uint8)
filledIndicesX, filledIndicesY = polygon(
xVertexArr_image, yVertexArr_image, shape=sliceArr.shape
)
sliceArr[filledIndicesX, filledIndicesY] = 1
image_blank[zIndex] += sliceArr.T
struct_image = sitk.GetImageFromArray(1 * (image_blank > 0))
struct_image.CopyInformation(image)
structure_list.append(sitk.Cast(struct_image, sitk.sitkUInt8))
structure_name_clean = re.sub(r"[^\w]", "_", structure_name).upper()
while "__" in structure_name_clean:
structure_name_clean = structure_name_clean.replace("__", "_")
final_struct_name_sequence.append(structure_name_clean)
return final_struct_name_sequence, structure_list
def process_dicom_file_list(dicom_file_list, parent_sorting_field="PatientName", verbose=False):
"""
Organise the DICOM files by the series UID
"""
dicom_series_dict_parent = {}
for i, dicom_file in enumerate(sorted(dicom_file_list)):
if verbose is True:
logger.debug(f" Sorting file {i}")
dicom_file = dicom_file.as_posix()
if "dicomdir" in dicom_file.lower():
logger.warning(
"DICOMDIR is not supported in this tool, images are read directly. Skipping."
)
continue
dicom_object = pydicom.read_file(dicom_file, force=True)
parent_sorting_field_data = dicom_object[parent_sorting_field].value
if parent_sorting_field_data not in dicom_series_dict_parent.keys():
dicom_series_dict_parent[parent_sorting_field_data] = {}
series_uid = dicom_object.SeriesInstanceUID
if series_uid not in dicom_series_dict_parent[parent_sorting_field_data].keys():
dicom_series_dict_parent[parent_sorting_field_data][series_uid] = [dicom_file]
else:
dicom_series_dict_parent[parent_sorting_field_data][series_uid].append(dicom_file)
return dicom_series_dict_parent
def process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field="PatientName",
return_extra=True,
individual_file=False,
initial_sop_class_name_default="UNKNOWN",
):
if not individual_file:
logger.info(f" Processing series UID: {series_uid}")
dicom_file_list = dicom_series_dict[series_uid]
else:
logger.info(f" Processing individual file: {individual_file}")
dicom_file_list = [individual_file]
logger.info(f" Number of DICOM files: {len(dicom_file_list)}")
initial_dicom = pydicom.read_file(dicom_file_list[0])
# Get the data in the parent sorting field, clean with RegEx
parent_sorting_data = re.sub(
r"[^\w]", "_", str(initial_dicom[parent_sorting_field].value)
).upper()
if parent_sorting_data == "":
logger.error(
f"Could not find any data in {parent_sorting_field}. This is very bad, the data cannot be sorted properly."
)
"""
! TO DO
Implement a routine to let a user correlate a root directory with a name
"""
parent_sorting_data = "TEMP"
try:
initial_dicom_sop_class_name = initial_dicom.SOPClassUID.name
except AttributeError:
logger.warning(
f"Could not find DICOM SOP Class UID, using {initial_sop_class_name_default}."
)
initial_dicom_sop_class_name = initial_sop_class_name_default
try:
study_uid = initial_dicom.StudyInstanceUID
except AttributeError:
study_uid = "00001"
"""
! TO DO
Need to check for secondary capture image storage
This can include JPEGs with written information on them
This is typically not very useful
We can dump it to file
Or just save the DICOM file in the folder of interest
Not a big problem, sort out another day
"""
# Check the potential types of DICOM files
if (
"Image" in initial_dicom_sop_class_name
and initial_dicom_sop_class_name != "Secondary Capture Image Storage"
):
# Load as an primary image
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list)
try:
image = sitk.ReadImage(sorted_file_list)
except RuntimeError:
logger.warning(" Could not read image into SimpleITK.")
logger.info(" Processing files individually.")
for dicom_file in dicom_file_list:
return process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
individual_file=dicom_file,
initial_sop_class_name_default=initial_sop_class_name_default,
)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
"""
! TO DO - integrity check
Read in all the files here, check the slice location and determine if any are missing
"""
if initial_dicom.Modality == "PT":
# scaling_factor = get_suv_bw_scale_factor(initial_dicom)
# image *= scaling_factor
# !TO DO
# Work on PET SUV conversion
None
"""
! CHECKPOINT
Some DCE MRI sequences have the same series UID
Here we check the sequence name, and split if necessary
"""
if initial_dicom.Modality == "MR":
try:
sequence_names = np.unique(
[pydicom.read_file(x).SequenceName for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SequenceName
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
try:
logger.warning(
" MRI sequence name not found. The SeriesDescription will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).SeriesDescription for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SeriesDescription
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
logger.warning(
" MRI SeriesDescription not found. The AcquisitionComments will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).AcquisitionComments for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.AcquisitionComments
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
if initial_dicom.Manufacturer == "GE MEDICAL SYSTEMS":
# GE use the DICOM tag (0019, 10a2) [Raw data run number]
# in Diffusion weighted MRI sequences
# We need to separate this out to get the difference sequences
if initial_dicom.SeriesDescription == "Diffusion Weighted":
# num_sequences = int( (initial_dicom[(0x0025, 0x1007)]) / (initial_dicom[(0x0021, 0x104f)]) )
# number_of_images / images_per_seq
num_images_per_seq = initial_dicom[(0x0021, 0x104F)].value
sequence_names = np.unique(
[
f"DWI_{str( ( pydicom.read_file(x)['InstanceNumber'].value - 1) // num_images_per_seq )}"
for x in dicom_file_list
]
)
sequence_name_index_dict = {
name: index for index, name in enumerate(sequence_names)
}
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = f"DWI_{str( ( dcm_obj['InstanceNumber'].value - 1) // num_images_per_seq )}"
var_to_index = sequence_name_index_dict[var]
if var_to_index not in sequence_dict.keys():
sequence_dict[var_to_index] = [dcm_name]
else:
sequence_dict[var_to_index].append(dcm_name)
sequence_names = sorted(sequence_dict.keys())
if np.alen(sequence_names) > 1:
logger.warning(" Two MR sequences were found under a single series UID.")
logger.warning(" These will be split into separate images.")
# Split up the DICOM file list by sequence name
for sequence_name in sequence_names:
dicom_file_list_by_sequence = sequence_dict[sequence_name]
logger.info(sequence_name)
logger.info(len(dicom_file_list_by_sequence))
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list_by_sequence)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
image_by_sequence = sitk.ReadImage(sorted_file_list)
dicom_file_metadata_by_sequence = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
yield "IMAGES", dicom_file_metadata_by_sequence, initial_dicom, image_by_sequence
return # Stop iteration
yield "IMAGES", dicom_file_metadata, initial_dicom, image
if "Structure" in initial_dicom_sop_class_name:
# Load as an RT structure set
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
# We must also read in the corresponding DICOM image
# This can be found by matching the references series UID to the series UID
"""
! TO DO
What happens if there is an RT structure set with different referenced sequences?
"""
# Get the "ReferencedFrameOfReferenceSequence", first item
referenced_frame_of_reference_item = dicom_object.ReferencedFrameOfReferenceSequence[0]
# Get the "RTReferencedStudySequence", first item
# This retrieves the study UID
# This might be useful, but would typically match the actual StudyInstanceUID in the
# DICOM object
rt_referenced_series_item = (
referenced_frame_of_reference_item.RTReferencedStudySequence[0]
)
# Get the "RTReferencedSeriesSequence", first item
# This retreives the actual referenced series UID, which we need to match imaging
# parameters
rt_referenced_series_again_item = rt_referenced_series_item.RTReferencedSeriesSequence[
0
]
# Get the appropriate series instance UID
image_series_uid = rt_referenced_series_again_item.SeriesInstanceUID
logger.info(f" Item {index}: Matched SeriesInstanceUID = {image_series_uid}")
# Read in the corresponding image
sorted_file_list = safe_sort_dicom_image_list(dicom_series_dict[image_series_uid])
image = sitk.ReadImage(sorted_file_list)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
(
structure_name_list,
structure_image_list,
) = transform_point_set_from_dicom_struct(image, dicom_object)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
"structure_name_list": structure_name_list,
}
yield "STRUCTURES", dicom_file_metadata, dicom_object, structure_image_list
if "Dose" in initial_dicom_sop_class_name:
# Load as an RT Dose distribution
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
"""
! CHECKPOINT
There should only be a single RT dose file (with each series UID)
If there are more, yield each
"""
initial_dicom = pydicom.read_file(dicom_file, force=True)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
# We must read in as a float otherwise when we multiply by one later it will not work!
raw_dose_image = sitk.ReadImage(dicom_file, sitk.sitkFloat32)
dose_grid_scaling = dicom_object.DoseGridScaling
logger.debug(f" Dose grid scaling: {dose_grid_scaling} Gy")
scaled_dose_image = raw_dose_image * dose_grid_scaling
yield "DOSES", dicom_file_metadata, dicom_object, scaled_dose_image
"""
! TO DO
1. (DONE) Implement conversion of dose files (to NIFTI images)
2. Implement conversion of RT plan files to text dump
3. Do something with other files (e.g. Deformable Image Registration stuff)
"""
return
def write_output_data_to_disk(
output_data_dict,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
):
"""
Write output to disk
"""
if output_data_dict is None:
return
filename_fields = [i for i in output_data_dict.keys() if i != "parent_sorting_data"]
parent_sorting_data = output_data_dict["parent_sorting_data"]
files_written = {}
"""
Write the the converted images to disk
! CONSIDER
We could simply write as we go?
Pro: save memory, important if processing very large files
Con: Reading as we go allows proper indexing
"""
for field in filename_fields:
logger.info(f" Writing files for field: {field}")
p = pathlib.Path(output_directory) / parent_sorting_data / field
p.mkdir(parents=True, exist_ok=True)
files_written[field] = []
for field_filename_base, field_list in output_data_dict[field].items():
# Check if there is a list of images with matching names
# This will depend on the name format chosen
# If there is a list, we append an index as we write to disk
if isinstance(field_list, (tuple, list)):
# Flatten
field_list_flat = list(flatten(field_list))
# Iterate
for suffix, file_to_write in enumerate(field_list_flat):
field_filename = field_filename_base + f"_{suffix}"
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
else:
field_filename = field_filename_base
file_to_write = field_list
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
"""
! TO DO
Use pathlib, and perform some checks so we don"t overwrite anything!
"""
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
return files_written
def process_dicom_directory(
dicom_directory,
parent_sorting_field="PatientName",
output_image_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{image_desc}_{SeriesNumber}",
output_structure_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{structure_name}",
output_dose_name_format="{parent_sorting_data}_{study_uid_index}_{DoseSummationType}",
return_extra=True,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
write_to_disk=True,
verbose=False,
initial_sop_class_name_default="UNKNOWN",
):
# Check dicom_directory type
if isinstance(dicom_directory, str) or isinstance(dicom_directory, pathlib.Path):
# Get all the DICOM files in the given directory
root_path = pathlib.Path(dicom_directory)
# Find files ending with .dcm, .dc3
dicom_file_list = [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
elif hasattr(dicom_directory, "__iter__"):
dicom_file_list = []
for dicom_dir in dicom_directory:
# Get all the DICOM files in each directory
root_path = pathlib.Path(dicom_dir)
# Find files ending with .dcm, .dc3
dicom_file_list += [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
if len(dicom_file_list) == 0:
logger.info("No DICOM files found in input directory. Exiting now.")
return
# Process the DICOM files
# This returns a dictionary (of dictionaries):
# {parent_data (e.g. PatientName): {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# parent_data_2 : {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# ... }
dicom_series_dict_parent = process_dicom_file_list(
dicom_file_list, parent_sorting_field=parent_sorting_field, verbose=verbose
)
if dicom_series_dict_parent is None:
logger.info("No valid DICOM files found. Ending.")
return None
output = {}
for parent_data, dicom_series_dict in dicom_series_dict_parent.items():
logger.info(f"Processing data for {parent_sorting_field} = {parent_data}.")
logger.info(f" Number of DICOM series = {len(dicom_series_dict.keys())}")
# Set up the output data
# This stores the SimpleITK images and file names
output_data_dict = {}
# Set up the study UID dict
# This helps match structure sets to relevant images
# And paired images to each other (e.g. PET/CT)
study_uid_dict = {}
# Give some user feedback
logger.debug(f" Output image name format: {output_image_name_format}")
logger.debug(f" Output structure name format: {output_structure_name_format}")
logger.debug(f" Output dose name format: {output_dose_name_format}")
# For each unique series UID, process the DICOM files
for series_uid in dicom_series_dict.keys():
# This function returns four values
# 1. dicom_type: This is IMAGES, STRUCTURES, DOSES, etc
# 2. dicom_file_metadata: Some special metadata extracted from the DICOM header
# 3. initial_dicom: The first DICOM in the series. For doses and structures there is
# (usually) only one DICOM anyway
# 4. dicom_file_data: The actual SimpleITK image data
for (
dicom_type,
dicom_file_metadata,
initial_dicom,
dicom_file_data,
) in process_dicom_series(
dicom_series_dict=dicom_series_dict,
series_uid=series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
initial_sop_class_name_default=initial_sop_class_name_default,
):
# Step 1
# Check the parent sorting field is consistent
# This would usually be the PatientName, PatientID, or similar
# Occasionally these will both be blank
parent_sorting_data = dicom_file_metadata["parent_sorting_data"]
if "parent_sorting_data" not in output_data_dict.keys():
output_data_dict["parent_sorting_data"] = parent_sorting_data
else:
if parent_sorting_data != output_data_dict["parent_sorting_data"]:
logger.error(
f"A conflict was found for the parent sorting field "
f"({parent_sorting_field}): {parent_sorting_data}"
)
logger.error("Quitting now.")
print(dicom_series_dict_parent.keys())
sys.exit()
else:
logger.info(
f" Parent sorting field ({parent_sorting_field}) match found: "
f"{parent_sorting_data}"
)
# Step 2
# Get the study UID
# Used for indexing DICOM series
study_uid = dicom_file_metadata["study_uid"]
if study_uid not in study_uid_dict.keys():
try:
study_uid_index = max(study_uid_dict.values()) + 1
except AttributeError:
study_uid_index = 0 # Study UID dict might not exist
except ValueError:
study_uid_index = 0 # Study UID dict might be empty
logger.info(f" Setting study instance UID index: {study_uid_index}")
study_uid_dict[study_uid] = study_uid_index
else:
logger.info(
f" Study instance UID index already exists: {study_uid_dict[study_uid]}"
)
# Step 3
# Generate names for output files
# Special names
# ! This can be defined once at the start of the function
special_name_fields = [
"parent_sorting_data",
"study_uid_index",
"image_desc",
"structure_name",
]
# Get the image description (other special names are already defined above)
image_desc = get_dicom_info_from_description(
initial_dicom, return_extra=return_extra
)
# Get all the fields from the user-given name format
if dicom_type == "IMAGES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_image_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "STRUCTURES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_structure_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "DOSES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_dose_name_format.split("}")
if len(i) > 0
]
# Now exclude those that aren't derived from the DICOM header
dicom_header_tags = [i for i in all_naming_fields if i not in special_name_fields]
naming_info_dict = {}
for dicom_field in dicom_header_tags:
try:
dicom_field_value = initial_dicom[dicom_field].value
except (AttributeError, KeyError):
logger.warning(
f" Could not find DICOM header {dicom_field}. Setting as 0 to "
f"preserve naming convention."
)
dicom_field_value = 0
naming_info_dict[dicom_field] = dicom_field_value
if dicom_type == "IMAGES":
output_name = output_image_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
**naming_info_dict,
)
if "IMAGES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["IMAGES"] = {output_name: dicom_file_data}
else:
# First check if there is another image of the same name
if output_name not in output_data_dict["IMAGES"].keys():
output_data_dict["IMAGES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if hasattr(output_data_dict["IMAGES"][output_name], "__iter__"):
output_data_dict["IMAGES"][output_name] = list(
[output_data_dict["IMAGES"][output_name]]
)
output_data_dict["IMAGES"][output_name].append(dicom_file_data)
elif dicom_type == "STRUCTURES":
for structure_name, structure_image in zip(
dicom_file_metadata["structure_name_list"], dicom_file_data
):
output_name = output_structure_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
structure_name=structure_name,
**naming_info_dict,
)
if "STRUCTURES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["STRUCTURES"] = {output_name: structure_image}
else:
# First check if there is another structure of the same name
if output_name not in output_data_dict["STRUCTURES"].keys():
output_data_dict["STRUCTURES"][output_name] = structure_image
else:
logger.info(" A structure with this name exists, appending.")
if hasattr(
output_data_dict["STRUCTURES"][output_name], "__iter__"
):
output_data_dict["STRUCTURES"][output_name] = list(
[output_data_dict["STRUCTURES"][output_name]]
)
output_data_dict["STRUCTURES"][output_name].append(structure_image)
elif dicom_type == "DOSES":
output_name = output_dose_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
**naming_info_dict,
)
if "DOSES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["DOSES"] = {output_name: dicom_file_data}
else:
# First check if there is another image of the same name
if output_name not in output_data_dict["DOSES"].keys():
output_data_dict["DOSES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if isinstance(output_data_dict["DOSES"][output_name], sitk.Image):
output_data_dict["DOSES"][output_name] = list(
[output_data_dict["DOSES"][output_name]]
)
output_data_dict["DOSES"][output_name].append(dicom_file_data)
if write_to_disk:
output[str(parent_data)] = write_output_data_to_disk(
output_data_dict=output_data_dict,
output_directory=output_directory,
output_file_suffix=output_file_suffix,
overwrite_existing_files=overwrite_existing_files,
)
else:
output[str(parent_data)] = output_data_dict
"""
TO DO!
Memory issue with output_data_dict
Use in inner loop, reset output_data_dict
"""
return output
| 39.28259
| 119
| 0.573898
|
import re
import sys
import pathlib
import pydicom
import numpy as np
import SimpleITK as sitk
from skimage.draw import polygon
from loguru import logger
from datetime import datetime
def flatten(itr):
if type(itr) in (str, bytes, sitk.Image):
yield itr
else:
for x in itr:
try:
yield from flatten(x)
except TypeError:
yield x
def get_suv_bw_scale_factor(ds):
if ds.Units == "CNTS":
return float(ds[0x7053, 0x1000].value)
assert ds.Modality == "PT"
assert "DECY" in ds.CorrectedImage
assert "ATTN" in ds.CorrectedImage
assert "START" in ds.DecayCorrection
assert ds.Units == "BQML"
half_life = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideHalfLife)
if "SeriesTime" in ds:
series_date_time = ds.SeriesDate + "_" + ds.SeriesTime
if "." in series_date_time:
series_date_time = series_date_time[
: -(len(series_date_time) - series_date_time.index("."))
]
series_date_time = datetime.strptime(series_date_time, "%Y%m%d_%H%M%S")
if "SeriesTime" in ds:
start_time = (
ds.SeriesDate
+ "_"
+ ds.RadiopharmaceuticalInformationSequence[0].RadiopharmaceuticalStartTime
)
if "." in start_time:
start_time = start_time[: -(len(start_time) - start_time.index("."))]
start_time = datetime.strptime(start_time, "%Y%m%d_%H%M%S")
decay_time = (series_date_time - start_time).seconds
injected_dose = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideTotalDose)
decayed_dose = injected_dose * pow(2, -decay_time / half_life)
patient_weight = float(ds.PatientWeight)
suv_bw_scale_factor = patient_weight * 1000 / decayed_dose
return suv_bw_scale_factor
def get_dicom_info_from_description(dicom_object, return_extra=False, sop_class_name="UNKNOWN"):
try:
dicom_sop_class_name = dicom_object.SOPClassUID.name
except AttributeError:
logger.warning(f"Could not find DICOM SOP Class UID, using {sop_class_name}.")
dicom_sop_class_name = sop_class_name
if "Image" in dicom_sop_class_name:
# Get the modality
image_modality = dicom_object.Modality
logger.info(f" Image modality: {image_modality}")
if image_modality == "CT":
# There is typically not much extra information
# At the moment, we do not return anything for CT imaging
if return_extra:
try:
protocol_name = dicom_object.ProtocolName
if protocol_name != "":
return re.sub(r"[^\w]", "_", protocol_name).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
return ""
elif image_modality == "MR":
# Not much consistency, but we can get the protocol name
try:
protocol_name = re.sub(r"[^\w]", "_", dicom_object.ProtocolName).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
protocol_name = ""
try:
sequence_name = re.sub(r"[^\w]", "_", dicom_object.SequenceName).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
sequence_name = ""
try:
series_description = re.sub(r"[^\w]", "_", dicom_object.SeriesDescription).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
series_description = ""
combined_name = "_".join([protocol_name, sequence_name, series_description])
while "__" in combined_name:
combined_name = combined_name.replace("__", "_")
if protocol_name != "" and not return_extra:
return protocol_name
else:
return combined_name
elif image_modality == "PT":
# Not much experience with this
# We can search through the corrections applied
# Return whether or not attentuation is applied
try:
corrections = dicom_object.CorrectedImage
except AttributeError:
corrections = "NONE"
if "ATTN" in corrections:
return "AC"
else:
return "NAC"
def safe_sort_dicom_image_list(dicom_image_list):
sorted_dict = {}
for dicom_file in dicom_image_list:
dcm = pydicom.read_file(dicom_file, force=True)
image_position = np.array(dcm.ImagePositionPatient, dtype=float)
image_orientation = np.array(dcm.ImageOrientationPatient, dtype=float)
image_plane_normal = np.cross(image_orientation[:3], image_orientation[3:])
slice_location = (image_position * image_plane_normal)[2]
sorted_dict[dicom_file] = slice_location
sorter_safe = lambda dcm_file: sorted_dict[dcm_file]
return sorted(dicom_image_list, key=sorter_safe)
def fix_missing_data(contour_data_list):
contour_data = np.array(contour_data_list)
if contour_data.any() == "":
logger.warning(" Missing values detected.")
missing_values = np.where(contour_data == "")[0]
if missing_values.shape[0] > 1:
logger.warning(" More than one value missing, fixing this isn't implemented yet...")
else:
logger.warning(" Only one value missing.")
missing_index = missing_values[0]
missing_axis = missing_index % 3
if missing_axis == 0:
logger.warning(" Missing value in x axis: interpolating.")
if missing_index > len(contour_data) - 3:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[0]
elif missing_index == 0:
lower_val = contour_data[-3]
upper_val = contour_data[3]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
elif missing_axis == 1:
logger.warning(" Missing value in y axis: interpolating.")
if missing_index > len(contour_data) - 2:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[1]
elif missing_index == 0:
lower_val = contour_data[-2]
upper_val = contour_data[4]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
else:
logger.warning(" Missing value in z axis: taking slice value")
temp = contour_data[2::3].tolist()
temp.remove("")
contour_data[missing_index] = np.min(np.array(temp, dtype=np.double))
return contour_data
def transform_point_set_from_dicom_struct(image, dicom_struct, spacing_override=False):
if spacing_override:
current_spacing = list(image.GetSpacing())
new_spacing = tuple(
[
current_spacing[k] if spacing_override[k] == 0 else spacing_override[k]
for k in range(3)
]
)
image.SetSpacing(new_spacing)
struct_point_sequence = dicom_struct.ROIContourSequence
struct_name_sequence = [
"_".join(i.ROIName.split()) for i in dicom_struct.StructureSetROISequence
]
structure_list = []
final_struct_name_sequence = []
for structIndex, structure_name in enumerate(struct_name_sequence):
image_blank = np.zeros(image.GetSize()[::-1], dtype=np.uint8)
logger.info(
" Converting structure {0} with name: {1}".format(structIndex, structure_name)
)
if structIndex >= len(struct_point_sequence):
logger.warning(" Contour sequence is missing, skipping.")
continue
if not hasattr(struct_point_sequence[structIndex], "ContourSequence"):
logger.warning(" No contour sequence found for this structure, skipping.")
continue
if len(struct_point_sequence[structIndex].ContourSequence) == 0:
logger.warning(" Contour sequence is empty, skipping.")
continue
if (
not struct_point_sequence[structIndex].ContourSequence[0].ContourGeometricType
== "CLOSED_PLANAR"
):
logger.warning(" This is not a closed planar structure, skipping.")
continue
for sl in range(len(struct_point_sequence[structIndex].ContourSequence)):
contour_data = fix_missing_data(
struct_point_sequence[structIndex].ContourSequence[sl].ContourData
)
struct_slice_contour_data = np.array(contour_data, dtype=np.double)
vertexArr_physical = struct_slice_contour_data.reshape(
struct_slice_contour_data.shape[0] // 3, 3
)
point_arr = np.array(
[image.TransformPhysicalPointToIndex(i) for i in vertexArr_physical]
).T
[xVertexArr_image, yVertexArr_image] = point_arr[[0, 1]]
zIndex = point_arr[2][0]
if np.any(point_arr[2] != zIndex):
logger.error(" Axial slice index varies in contour. Quitting now.")
logger.error(" Structure: {0}".format(structure_name))
logger.error(" Slice index: {0}".format(zIndex))
quit()
if zIndex >= image.GetSize()[2]:
logger.warning(" Slice index greater than image size. Skipping slice.")
logger.warning(" Structure: {0}".format(structure_name))
logger.warning(" Slice index: {0}".format(zIndex))
continue
sliceArr = np.zeros(image.GetSize()[:2], dtype=np.uint8)
filledIndicesX, filledIndicesY = polygon(
xVertexArr_image, yVertexArr_image, shape=sliceArr.shape
)
sliceArr[filledIndicesX, filledIndicesY] = 1
image_blank[zIndex] += sliceArr.T
struct_image = sitk.GetImageFromArray(1 * (image_blank > 0))
struct_image.CopyInformation(image)
structure_list.append(sitk.Cast(struct_image, sitk.sitkUInt8))
structure_name_clean = re.sub(r"[^\w]", "_", structure_name).upper()
while "__" in structure_name_clean:
structure_name_clean = structure_name_clean.replace("__", "_")
final_struct_name_sequence.append(structure_name_clean)
return final_struct_name_sequence, structure_list
def process_dicom_file_list(dicom_file_list, parent_sorting_field="PatientName", verbose=False):
dicom_series_dict_parent = {}
for i, dicom_file in enumerate(sorted(dicom_file_list)):
if verbose is True:
logger.debug(f" Sorting file {i}")
dicom_file = dicom_file.as_posix()
if "dicomdir" in dicom_file.lower():
logger.warning(
"DICOMDIR is not supported in this tool, images are read directly. Skipping."
)
continue
dicom_object = pydicom.read_file(dicom_file, force=True)
parent_sorting_field_data = dicom_object[parent_sorting_field].value
if parent_sorting_field_data not in dicom_series_dict_parent.keys():
dicom_series_dict_parent[parent_sorting_field_data] = {}
series_uid = dicom_object.SeriesInstanceUID
if series_uid not in dicom_series_dict_parent[parent_sorting_field_data].keys():
dicom_series_dict_parent[parent_sorting_field_data][series_uid] = [dicom_file]
else:
dicom_series_dict_parent[parent_sorting_field_data][series_uid].append(dicom_file)
return dicom_series_dict_parent
def process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field="PatientName",
return_extra=True,
individual_file=False,
initial_sop_class_name_default="UNKNOWN",
):
if not individual_file:
logger.info(f" Processing series UID: {series_uid}")
dicom_file_list = dicom_series_dict[series_uid]
else:
logger.info(f" Processing individual file: {individual_file}")
dicom_file_list = [individual_file]
logger.info(f" Number of DICOM files: {len(dicom_file_list)}")
initial_dicom = pydicom.read_file(dicom_file_list[0])
# Get the data in the parent sorting field, clean with RegEx
parent_sorting_data = re.sub(
r"[^\w]", "_", str(initial_dicom[parent_sorting_field].value)
).upper()
if parent_sorting_data == "":
logger.error(
f"Could not find any data in {parent_sorting_field}. This is very bad, the data cannot be sorted properly."
)
parent_sorting_data = "TEMP"
try:
initial_dicom_sop_class_name = initial_dicom.SOPClassUID.name
except AttributeError:
logger.warning(
f"Could not find DICOM SOP Class UID, using {initial_sop_class_name_default}."
)
initial_dicom_sop_class_name = initial_sop_class_name_default
try:
study_uid = initial_dicom.StudyInstanceUID
except AttributeError:
study_uid = "00001"
# Check the potential types of DICOM files
if (
"Image" in initial_dicom_sop_class_name
and initial_dicom_sop_class_name != "Secondary Capture Image Storage"
):
# Load as an primary image
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list)
try:
image = sitk.ReadImage(sorted_file_list)
except RuntimeError:
logger.warning(" Could not read image into SimpleITK.")
logger.info(" Processing files individually.")
for dicom_file in dicom_file_list:
return process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
individual_file=dicom_file,
initial_sop_class_name_default=initial_sop_class_name_default,
)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
if initial_dicom.Modality == "PT":
# scaling_factor = get_suv_bw_scale_factor(initial_dicom)
# image *= scaling_factor
# !TO DO
# Work on PET SUV conversion
None
if initial_dicom.Modality == "MR":
try:
sequence_names = np.unique(
[pydicom.read_file(x).SequenceName for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SequenceName
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
try:
logger.warning(
" MRI sequence name not found. The SeriesDescription will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).SeriesDescription for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SeriesDescription
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
logger.warning(
" MRI SeriesDescription not found. The AcquisitionComments will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).AcquisitionComments for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.AcquisitionComments
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
if initial_dicom.Manufacturer == "GE MEDICAL SYSTEMS":
# GE use the DICOM tag (0019, 10a2) [Raw data run number]
# in Diffusion weighted MRI sequences
# We need to separate this out to get the difference sequences
if initial_dicom.SeriesDescription == "Diffusion Weighted":
# num_sequences = int( (initial_dicom[(0x0025, 0x1007)]) / (initial_dicom[(0x0021, 0x104f)]) )
# number_of_images / images_per_seq
num_images_per_seq = initial_dicom[(0x0021, 0x104F)].value
sequence_names = np.unique(
[
f"DWI_{str( ( pydicom.read_file(x)['InstanceNumber'].value - 1) // num_images_per_seq )}"
for x in dicom_file_list
]
)
sequence_name_index_dict = {
name: index for index, name in enumerate(sequence_names)
}
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = f"DWI_{str( ( dcm_obj['InstanceNumber'].value - 1) // num_images_per_seq )}"
var_to_index = sequence_name_index_dict[var]
if var_to_index not in sequence_dict.keys():
sequence_dict[var_to_index] = [dcm_name]
else:
sequence_dict[var_to_index].append(dcm_name)
sequence_names = sorted(sequence_dict.keys())
if np.alen(sequence_names) > 1:
logger.warning(" Two MR sequences were found under a single series UID.")
logger.warning(" These will be split into separate images.")
# Split up the DICOM file list by sequence name
for sequence_name in sequence_names:
dicom_file_list_by_sequence = sequence_dict[sequence_name]
logger.info(sequence_name)
logger.info(len(dicom_file_list_by_sequence))
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list_by_sequence)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
image_by_sequence = sitk.ReadImage(sorted_file_list)
dicom_file_metadata_by_sequence = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
yield "IMAGES", dicom_file_metadata_by_sequence, initial_dicom, image_by_sequence
return # Stop iteration
yield "IMAGES", dicom_file_metadata, initial_dicom, image
if "Structure" in initial_dicom_sop_class_name:
# Load as an RT structure set
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
# We must also read in the corresponding DICOM image
# This can be found by matching the references series UID to the series UID
# Get the "ReferencedFrameOfReferenceSequence", first item
referenced_frame_of_reference_item = dicom_object.ReferencedFrameOfReferenceSequence[0]
# Get the "RTReferencedStudySequence", first item
# This retrieves the study UID
# This might be useful, but would typically match the actual StudyInstanceUID in the
# DICOM object
rt_referenced_series_item = (
referenced_frame_of_reference_item.RTReferencedStudySequence[0]
)
# Get the "RTReferencedSeriesSequence", first item
# This retreives the actual referenced series UID, which we need to match imaging
# parameters
rt_referenced_series_again_item = rt_referenced_series_item.RTReferencedSeriesSequence[
0
]
# Get the appropriate series instance UID
image_series_uid = rt_referenced_series_again_item.SeriesInstanceUID
logger.info(f" Item {index}: Matched SeriesInstanceUID = {image_series_uid}")
# Read in the corresponding image
sorted_file_list = safe_sort_dicom_image_list(dicom_series_dict[image_series_uid])
image = sitk.ReadImage(sorted_file_list)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
(
structure_name_list,
structure_image_list,
) = transform_point_set_from_dicom_struct(image, dicom_object)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
"structure_name_list": structure_name_list,
}
yield "STRUCTURES", dicom_file_metadata, dicom_object, structure_image_list
if "Dose" in initial_dicom_sop_class_name:
# Load as an RT Dose distribution
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
initial_dicom = pydicom.read_file(dicom_file, force=True)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
# We must read in as a float otherwise when we multiply by one later it will not work!
raw_dose_image = sitk.ReadImage(dicom_file, sitk.sitkFloat32)
dose_grid_scaling = dicom_object.DoseGridScaling
logger.debug(f" Dose grid scaling: {dose_grid_scaling} Gy")
scaled_dose_image = raw_dose_image * dose_grid_scaling
yield "DOSES", dicom_file_metadata, dicom_object, scaled_dose_image
return
def write_output_data_to_disk(
output_data_dict,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
):
if output_data_dict is None:
return
filename_fields = [i for i in output_data_dict.keys() if i != "parent_sorting_data"]
parent_sorting_data = output_data_dict["parent_sorting_data"]
files_written = {}
for field in filename_fields:
logger.info(f" Writing files for field: {field}")
p = pathlib.Path(output_directory) / parent_sorting_data / field
p.mkdir(parents=True, exist_ok=True)
files_written[field] = []
for field_filename_base, field_list in output_data_dict[field].items():
# Check if there is a list of images with matching names
# This will depend on the name format chosen
# If there is a list, we append an index as we write to disk
if isinstance(field_list, (tuple, list)):
# Flatten
field_list_flat = list(flatten(field_list))
# Iterate
for suffix, file_to_write in enumerate(field_list_flat):
field_filename = field_filename_base + f"_{suffix}"
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
else:
field_filename = field_filename_base
file_to_write = field_list
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
"""
! TO DO
Use pathlib, and perform some checks so we don"t overwrite anything!
"""
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
return files_written
def process_dicom_directory(
dicom_directory,
parent_sorting_field="PatientName",
output_image_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{image_desc}_{SeriesNumber}",
output_structure_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{structure_name}",
output_dose_name_format="{parent_sorting_data}_{study_uid_index}_{DoseSummationType}",
return_extra=True,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
write_to_disk=True,
verbose=False,
initial_sop_class_name_default="UNKNOWN",
):
# Check dicom_directory type
if isinstance(dicom_directory, str) or isinstance(dicom_directory, pathlib.Path):
# Get all the DICOM files in the given directory
root_path = pathlib.Path(dicom_directory)
# Find files ending with .dcm, .dc3
dicom_file_list = [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
elif hasattr(dicom_directory, "__iter__"):
dicom_file_list = []
for dicom_dir in dicom_directory:
# Get all the DICOM files in each directory
root_path = pathlib.Path(dicom_dir)
# Find files ending with .dcm, .dc3
dicom_file_list += [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
if len(dicom_file_list) == 0:
logger.info("No DICOM files found in input directory. Exiting now.")
return
# Process the DICOM files
# This returns a dictionary (of dictionaries):
# {parent_data (e.g. PatientName): {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# parent_data_2 : {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# ... }
dicom_series_dict_parent = process_dicom_file_list(
dicom_file_list, parent_sorting_field=parent_sorting_field, verbose=verbose
)
if dicom_series_dict_parent is None:
logger.info("No valid DICOM files found. Ending.")
return None
output = {}
for parent_data, dicom_series_dict in dicom_series_dict_parent.items():
logger.info(f"Processing data for {parent_sorting_field} = {parent_data}.")
logger.info(f" Number of DICOM series = {len(dicom_series_dict.keys())}")
# Set up the output data
# This stores the SimpleITK images and file names
output_data_dict = {}
# Set up the study UID dict
# This helps match structure sets to relevant images
# And paired images to each other (e.g. PET/CT)
study_uid_dict = {}
# Give some user feedback
logger.debug(f" Output image name format: {output_image_name_format}")
logger.debug(f" Output structure name format: {output_structure_name_format}")
logger.debug(f" Output dose name format: {output_dose_name_format}")
# For each unique series UID, process the DICOM files
for series_uid in dicom_series_dict.keys():
# This function returns four values
# 1. dicom_type: This is IMAGES, STRUCTURES, DOSES, etc
# 2. dicom_file_metadata: Some special metadata extracted from the DICOM header
# 3. initial_dicom: The first DICOM in the series. For doses and structures there is
# (usually) only one DICOM anyway
# 4. dicom_file_data: The actual SimpleITK image data
for (
dicom_type,
dicom_file_metadata,
initial_dicom,
dicom_file_data,
) in process_dicom_series(
dicom_series_dict=dicom_series_dict,
series_uid=series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
initial_sop_class_name_default=initial_sop_class_name_default,
):
# Step 1
# Check the parent sorting field is consistent
# This would usually be the PatientName, PatientID, or similar
# Occasionally these will both be blank
parent_sorting_data = dicom_file_metadata["parent_sorting_data"]
if "parent_sorting_data" not in output_data_dict.keys():
output_data_dict["parent_sorting_data"] = parent_sorting_data
else:
if parent_sorting_data != output_data_dict["parent_sorting_data"]:
logger.error(
f"A conflict was found for the parent sorting field "
f"({parent_sorting_field}): {parent_sorting_data}"
)
logger.error("Quitting now.")
print(dicom_series_dict_parent.keys())
sys.exit()
else:
logger.info(
f" Parent sorting field ({parent_sorting_field}) match found: "
f"{parent_sorting_data}"
)
# Step 2
# Get the study UID
# Used for indexing DICOM series
study_uid = dicom_file_metadata["study_uid"]
if study_uid not in study_uid_dict.keys():
try:
study_uid_index = max(study_uid_dict.values()) + 1
except AttributeError:
study_uid_index = 0 # Study UID dict might not exist
except ValueError:
study_uid_index = 0 # Study UID dict might be empty
logger.info(f" Setting study instance UID index: {study_uid_index}")
study_uid_dict[study_uid] = study_uid_index
else:
logger.info(
f" Study instance UID index already exists: {study_uid_dict[study_uid]}"
)
# Step 3
# Generate names for output files
# Special names
# ! This can be defined once at the start of the function
special_name_fields = [
"parent_sorting_data",
"study_uid_index",
"image_desc",
"structure_name",
]
# Get the image description (other special names are already defined above)
image_desc = get_dicom_info_from_description(
initial_dicom, return_extra=return_extra
)
# Get all the fields from the user-given name format
if dicom_type == "IMAGES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_image_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "STRUCTURES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_structure_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "DOSES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_dose_name_format.split("}")
if len(i) > 0
]
# Now exclude those that aren't derived from the DICOM header
dicom_header_tags = [i for i in all_naming_fields if i not in special_name_fields]
naming_info_dict = {}
for dicom_field in dicom_header_tags:
try:
dicom_field_value = initial_dicom[dicom_field].value
except (AttributeError, KeyError):
logger.warning(
f" Could not find DICOM header {dicom_field}. Setting as 0 to "
f"preserve naming convention."
)
dicom_field_value = 0
naming_info_dict[dicom_field] = dicom_field_value
if dicom_type == "IMAGES":
output_name = output_image_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
**naming_info_dict,
)
if "IMAGES" not in output_data_dict.keys():
output_data_dict["IMAGES"] = {output_name: dicom_file_data}
else:
if output_name not in output_data_dict["IMAGES"].keys():
output_data_dict["IMAGES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if hasattr(output_data_dict["IMAGES"][output_name], "__iter__"):
output_data_dict["IMAGES"][output_name] = list(
[output_data_dict["IMAGES"][output_name]]
)
output_data_dict["IMAGES"][output_name].append(dicom_file_data)
elif dicom_type == "STRUCTURES":
for structure_name, structure_image in zip(
dicom_file_metadata["structure_name_list"], dicom_file_data
):
output_name = output_structure_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
structure_name=structure_name,
**naming_info_dict,
)
if "STRUCTURES" not in output_data_dict.keys():
output_data_dict["STRUCTURES"] = {output_name: structure_image}
else:
if output_name not in output_data_dict["STRUCTURES"].keys():
output_data_dict["STRUCTURES"][output_name] = structure_image
else:
logger.info(" A structure with this name exists, appending.")
if hasattr(
output_data_dict["STRUCTURES"][output_name], "__iter__"
):
output_data_dict["STRUCTURES"][output_name] = list(
[output_data_dict["STRUCTURES"][output_name]]
)
output_data_dict["STRUCTURES"][output_name].append(structure_image)
elif dicom_type == "DOSES":
output_name = output_dose_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
**naming_info_dict,
)
if "DOSES" not in output_data_dict.keys():
output_data_dict["DOSES"] = {output_name: dicom_file_data}
else:
if output_name not in output_data_dict["DOSES"].keys():
output_data_dict["DOSES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if isinstance(output_data_dict["DOSES"][output_name], sitk.Image):
output_data_dict["DOSES"][output_name] = list(
[output_data_dict["DOSES"][output_name]]
)
output_data_dict["DOSES"][output_name].append(dicom_file_data)
if write_to_disk:
output[str(parent_data)] = write_output_data_to_disk(
output_data_dict=output_data_dict,
output_directory=output_directory,
output_file_suffix=output_file_suffix,
overwrite_existing_files=overwrite_existing_files,
)
else:
output[str(parent_data)] = output_data_dict
return output
| true
| true
|
790c024a0670060ed0c26e7ddfda4cb6652205e8
| 10,808
|
py
|
Python
|
xlnet-master/train_gpu.py
|
zouning68/nlp_transfer_learning
|
e5010c5022c6cb0944cdbaaee402fd6d918fad3f
|
[
"Apache-2.0"
] | 6,293
|
2019-06-19T23:29:13.000Z
|
2022-03-31T13:07:52.000Z
|
xlnet-master/train_gpu.py
|
zouning68/nlp_transfer_learning
|
e5010c5022c6cb0944cdbaaee402fd6d918fad3f
|
[
"Apache-2.0"
] | 267
|
2019-06-20T00:25:13.000Z
|
2022-02-06T14:09:02.000Z
|
xlnet-master/train_gpu.py
|
zouning68/nlp_transfer_learning
|
e5010c5022c6cb0944cdbaaee402fd6d918fad3f
|
[
"Apache-2.0"
] | 1,256
|
2019-06-20T01:13:22.000Z
|
2022-03-28T07:18:24.000Z
|
"""Pretraining on GPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys
import math
import json
import time
import numpy as np
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
import data_utils
import model_utils
from gpu_utils import assign_to_gpu, average_grads_and_vars
import function_builder
# GPU config
flags.DEFINE_integer("num_hosts", default=1,
help="Number of hosts")
flags.DEFINE_integer("num_core_per_host", default=8,
help="Number of cores per host")
flags.DEFINE_bool("use_tpu", default=False,
help="Whether to use TPUs for training.")
# Experiment (data/checkpoint/directory) config
flags.DEFINE_integer("num_passes", default=1,
help="Number of passed used for training.")
flags.DEFINE_string("record_info_dir", default=None,
help="Path to local directory containing `record_info-lm.json`.")
flags.DEFINE_string("model_dir", default=None,
help="Estimator model_dir.")
flags.DEFINE_string("init_checkpoint", default=None,
help="checkpoint path for initializing the model.")
# Optimization config
flags.DEFINE_float("learning_rate", default=1e-4,
help="Maximum learning rate.")
flags.DEFINE_float("clip", default=1.0,
help="Gradient clipping value.")
# for cosine decay
flags.DEFINE_float("min_lr_ratio", default=0.001,
help="Minimum ratio learning rate.")
flags.DEFINE_integer("warmup_steps", default=0,
help="Number of steps for linear lr warmup.")
flags.DEFINE_float("adam_epsilon", default=1e-8,
help="Adam epsilon")
flags.DEFINE_string("decay_method", default="poly",
help="poly or cos")
flags.DEFINE_float("weight_decay", default=0.0,
help="weight decay")
# Training config
flags.DEFINE_integer("train_batch_size", default=16,
help="Size of train batch.")
flags.DEFINE_integer("train_steps", default=100000,
help="Total number of training steps.")
flags.DEFINE_integer("iterations", default=1000,
help="Number of iterations per repeat loop.")
flags.DEFINE_integer("save_steps", default=None,
help="number of steps for model checkpointing.")
# Data config
flags.DEFINE_integer('seq_len', default=0,
help='Sequence length for pretraining.')
flags.DEFINE_integer('reuse_len', default=0,
help="How many tokens to be reused in the next batch. "
"Could be half of seq_len")
flags.DEFINE_bool("bi_data", default=True,
help="Use bidirectional data streams, i.e., forward & backward.")
flags.DEFINE_integer("mask_alpha", default=6,
help="How many tokens to form a group.")
flags.DEFINE_integer("mask_beta", default=1,
help="How many tokens to mask within each group.")
flags.DEFINE_integer("num_predict", default=None,
help="Number of tokens to predict in partial prediction.")
flags.DEFINE_integer('perm_size', default=None,
help='perm size.')
flags.DEFINE_bool("uncased", False,
help="Use uncased inputs or not.")
flags.DEFINE_integer("n_token", 32000, help="Vocab size")
# Model config
flags.DEFINE_integer("mem_len", default=0,
help="Number of steps to cache")
flags.DEFINE_bool("same_length", default=False,
help="Same length attention")
flags.DEFINE_integer("clamp_len", default=-1,
help="Clamp length")
flags.DEFINE_integer("n_layer", default=6,
help="Number of layers.")
flags.DEFINE_integer("d_model", default=32,
help="Dimension of the model.")
flags.DEFINE_integer("d_embed", default=32,
help="Dimension of the embeddings.")
flags.DEFINE_integer("n_head", default=4,
help="Number of attention heads.")
flags.DEFINE_integer("d_head", default=8,
help="Dimension of each attention head.")
flags.DEFINE_integer("d_inner", default=32,
help="Dimension of inner hidden size in positionwise feed-forward.")
flags.DEFINE_float("dropout", default=0.0,
help="Dropout rate.")
flags.DEFINE_float("dropatt", default=0.0,
help="Attention dropout rate.")
flags.DEFINE_bool("untie_r", default=False,
help="Untie r_w_bias and r_r_bias")
flags.DEFINE_string("summary_type", default="last",
help="Method used to summarize a sequence into a compact vector.")
flags.DEFINE_string("ff_activation", default="relu",
help="Activation type used in position-wise feed-forward.")
flags.DEFINE_bool("use_bfloat16", False,
help="Whether to use bfloat16.")
# Parameter initialization
flags.DEFINE_enum("init", default="normal",
enum_values=["normal", "uniform"],
help="Initialization method.")
flags.DEFINE_float("init_std", default=0.02,
help="Initialization std when init is normal.")
flags.DEFINE_float("init_range", default=0.1,
help="Initialization std when init is uniform.")
FLAGS = flags.FLAGS
def get_model_fn():
def model_fn(features, labels, mems, is_training):
#### Get loss from inputs
total_loss, new_mems, monitor_dict = function_builder.get_loss(
FLAGS, features, labels, mems, is_training)
#### Check model parameters
num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()])
tf.logging.info('#params: {}'.format(num_params))
# GPU
assert is_training
all_vars = tf.trainable_variables()
grads = tf.gradients(total_loss, all_vars)
grads_and_vars = list(zip(grads, all_vars))
return total_loss, new_mems, grads_and_vars
return model_fn
def single_core_graph(is_training, features, mems):
model_fn = get_model_fn()
model_ret = model_fn(
features=features,
labels=None,
mems=mems,
is_training=is_training)
return model_ret
def create_mems_tf(bsz_per_core):
mems = [tf.placeholder(dtype=tf.float32,
shape=[FLAGS.mem_len, bsz_per_core, FLAGS.d_model])
for layer in range(FLAGS.n_layer)]
return mems
def initialize_mems_np(bsz_per_core):
mems_np = [np.zeros(shape=[FLAGS.mem_len, bsz_per_core, FLAGS.d_model],
dtype=np.float32)
for layer in range(FLAGS.n_layer)]
return mems_np
def train(ps_device):
##### Get input function and model function
train_input_fn, record_info_dict = data_utils.get_input_fn(
tfrecord_dir=FLAGS.record_info_dir,
split="train",
bsz_per_host=FLAGS.train_batch_size,
seq_len=FLAGS.seq_len,
reuse_len=FLAGS.reuse_len,
bi_data=FLAGS.bi_data,
num_hosts=1,
num_core_per_host=1, # set to one no matter how many GPUs
perm_size=FLAGS.perm_size,
mask_alpha=FLAGS.mask_alpha,
mask_beta=FLAGS.mask_beta,
uncased=FLAGS.uncased,
num_passes=FLAGS.num_passes,
use_bfloat16=FLAGS.use_bfloat16,
num_predict=FLAGS.num_predict)
# for key, info in record_info_dict.items():
tf.logging.info("num of batches {}".format(record_info_dict["num_batch"]))
##### Create input tensors / placeholders
bsz_per_core = FLAGS.train_batch_size // FLAGS.num_core_per_host
params = {
"batch_size": FLAGS.train_batch_size # the whole batch
}
train_set = train_input_fn(params)
example = train_set.make_one_shot_iterator().get_next()
if FLAGS.num_core_per_host > 1:
examples = [{} for _ in range(FLAGS.num_core_per_host)]
for key in example.keys():
vals = tf.split(example[key], FLAGS.num_core_per_host, 0)
for device_id in range(FLAGS.num_core_per_host):
examples[device_id][key] = vals[device_id]
else:
examples = [example]
##### Create computational graph
tower_mems, tower_losses, tower_new_mems, tower_grads_and_vars = [], [], [], []
for i in range(FLAGS.num_core_per_host):
reuse = True if i > 0 else None
with tf.device(assign_to_gpu(i, ps_device)), \
tf.variable_scope(tf.get_variable_scope(), reuse=reuse):
# The mems for each tower is a dictionary
mems_i = {}
if FLAGS.mem_len:
mems_i["mems"] = create_mems_tf(bsz_per_core)
loss_i, new_mems_i, grads_and_vars_i = single_core_graph(
is_training=True,
features=examples[i],
mems=mems_i)
tower_mems.append(mems_i)
tower_losses.append(loss_i)
tower_new_mems.append(new_mems_i)
tower_grads_and_vars.append(grads_and_vars_i)
## average losses and gradients across towers
if len(tower_losses) > 1:
loss = tf.add_n(tower_losses) / len(tower_losses)
grads_and_vars = average_grads_and_vars(tower_grads_and_vars)
else:
loss = tower_losses[0]
grads_and_vars = tower_grads_and_vars[0]
## get train op
train_op, learning_rate, gnorm = model_utils.get_train_op(FLAGS, None,
grads_and_vars=grads_and_vars)
global_step = tf.train.get_global_step()
##### Training loop
# initialize mems
tower_mems_np = []
for i in range(FLAGS.num_core_per_host):
mems_i_np = {}
for key in tower_mems[i].keys():
mems_i_np[key] = initialize_mems_np(bsz_per_core)
tower_mems_np.append(mems_i_np)
saver = tf.train.Saver()
gpu_options = tf.GPUOptions(allow_growth=True)
model_utils.init_from_checkpoint(FLAGS, global_vars=True)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
gpu_options=gpu_options)) as sess:
sess.run(tf.global_variables_initializer())
fetches = [loss, tower_new_mems, global_step, gnorm, learning_rate, train_op]
total_loss, prev_step = 0., -1
while True:
feed_dict = {}
for i in range(FLAGS.num_core_per_host):
for key in tower_mems_np[i].keys():
for m, m_np in zip(tower_mems[i][key], tower_mems_np[i][key]):
feed_dict[m] = m_np
fetched = sess.run(fetches, feed_dict=feed_dict)
loss_np, tower_mems_np, curr_step = fetched[:3]
total_loss += loss_np
if curr_step > 0 and curr_step % FLAGS.iterations == 0:
curr_loss = total_loss / (curr_step - prev_step)
tf.logging.info("[{}] | gnorm {:.2f} lr {:8.6f} "
"| loss {:.2f} | pplx {:>7.2f}, bpc {:>7.4f}".format(
curr_step, fetched[-3], fetched[-2],
curr_loss, math.exp(curr_loss), curr_loss / math.log(2)))
total_loss, prev_step = 0., curr_step
if curr_step > 0 and curr_step % FLAGS.save_steps == 0:
save_path = os.path.join(FLAGS.model_dir, "model.ckpt")
saver.save(sess, save_path)
tf.logging.info("Model saved in path: {}".format(save_path))
if curr_step >= FLAGS.train_steps:
break
def main(unused_argv):
del unused_argv # Unused
tf.logging.set_verbosity(tf.logging.INFO)
# Get corpus info
FLAGS.n_token = data_utils.VOCAB_SIZE
tf.logging.info("n_token {}".format(FLAGS.n_token))
if not tf.gfile.Exists(FLAGS.model_dir):
tf.gfile.MakeDirs(FLAGS.model_dir)
train("/gpu:0")
if __name__ == "__main__":
tf.app.run()
| 32.851064
| 81
| 0.70383
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys
import math
import json
import time
import numpy as np
from absl import flags
import absl.logging as _logging
import tensorflow as tf
import data_utils
import model_utils
from gpu_utils import assign_to_gpu, average_grads_and_vars
import function_builder
flags.DEFINE_integer("num_hosts", default=1,
help="Number of hosts")
flags.DEFINE_integer("num_core_per_host", default=8,
help="Number of cores per host")
flags.DEFINE_bool("use_tpu", default=False,
help="Whether to use TPUs for training.")
flags.DEFINE_integer("num_passes", default=1,
help="Number of passed used for training.")
flags.DEFINE_string("record_info_dir", default=None,
help="Path to local directory containing `record_info-lm.json`.")
flags.DEFINE_string("model_dir", default=None,
help="Estimator model_dir.")
flags.DEFINE_string("init_checkpoint", default=None,
help="checkpoint path for initializing the model.")
flags.DEFINE_float("learning_rate", default=1e-4,
help="Maximum learning rate.")
flags.DEFINE_float("clip", default=1.0,
help="Gradient clipping value.")
flags.DEFINE_float("min_lr_ratio", default=0.001,
help="Minimum ratio learning rate.")
flags.DEFINE_integer("warmup_steps", default=0,
help="Number of steps for linear lr warmup.")
flags.DEFINE_float("adam_epsilon", default=1e-8,
help="Adam epsilon")
flags.DEFINE_string("decay_method", default="poly",
help="poly or cos")
flags.DEFINE_float("weight_decay", default=0.0,
help="weight decay")
flags.DEFINE_integer("train_batch_size", default=16,
help="Size of train batch.")
flags.DEFINE_integer("train_steps", default=100000,
help="Total number of training steps.")
flags.DEFINE_integer("iterations", default=1000,
help="Number of iterations per repeat loop.")
flags.DEFINE_integer("save_steps", default=None,
help="number of steps for model checkpointing.")
flags.DEFINE_integer('seq_len', default=0,
help='Sequence length for pretraining.')
flags.DEFINE_integer('reuse_len', default=0,
help="How many tokens to be reused in the next batch. "
"Could be half of seq_len")
flags.DEFINE_bool("bi_data", default=True,
help="Use bidirectional data streams, i.e., forward & backward.")
flags.DEFINE_integer("mask_alpha", default=6,
help="How many tokens to form a group.")
flags.DEFINE_integer("mask_beta", default=1,
help="How many tokens to mask within each group.")
flags.DEFINE_integer("num_predict", default=None,
help="Number of tokens to predict in partial prediction.")
flags.DEFINE_integer('perm_size', default=None,
help='perm size.')
flags.DEFINE_bool("uncased", False,
help="Use uncased inputs or not.")
flags.DEFINE_integer("n_token", 32000, help="Vocab size")
flags.DEFINE_integer("mem_len", default=0,
help="Number of steps to cache")
flags.DEFINE_bool("same_length", default=False,
help="Same length attention")
flags.DEFINE_integer("clamp_len", default=-1,
help="Clamp length")
flags.DEFINE_integer("n_layer", default=6,
help="Number of layers.")
flags.DEFINE_integer("d_model", default=32,
help="Dimension of the model.")
flags.DEFINE_integer("d_embed", default=32,
help="Dimension of the embeddings.")
flags.DEFINE_integer("n_head", default=4,
help="Number of attention heads.")
flags.DEFINE_integer("d_head", default=8,
help="Dimension of each attention head.")
flags.DEFINE_integer("d_inner", default=32,
help="Dimension of inner hidden size in positionwise feed-forward.")
flags.DEFINE_float("dropout", default=0.0,
help="Dropout rate.")
flags.DEFINE_float("dropatt", default=0.0,
help="Attention dropout rate.")
flags.DEFINE_bool("untie_r", default=False,
help="Untie r_w_bias and r_r_bias")
flags.DEFINE_string("summary_type", default="last",
help="Method used to summarize a sequence into a compact vector.")
flags.DEFINE_string("ff_activation", default="relu",
help="Activation type used in position-wise feed-forward.")
flags.DEFINE_bool("use_bfloat16", False,
help="Whether to use bfloat16.")
flags.DEFINE_enum("init", default="normal",
enum_values=["normal", "uniform"],
help="Initialization method.")
flags.DEFINE_float("init_std", default=0.02,
help="Initialization std when init is normal.")
flags.DEFINE_float("init_range", default=0.1,
help="Initialization std when init is uniform.")
FLAGS = flags.FLAGS
def get_model_fn():
def model_fn(features, labels, mems, is_training):
FLAGS, features, labels, mems, is_training)
tf.logging.info('#params: {}'.format(num_params))
assert is_training
all_vars = tf.trainable_variables()
grads = tf.gradients(total_loss, all_vars)
grads_and_vars = list(zip(grads, all_vars))
return total_loss, new_mems, grads_and_vars
return model_fn
def single_core_graph(is_training, features, mems):
model_fn = get_model_fn()
model_ret = model_fn(
features=features,
labels=None,
mems=mems,
is_training=is_training)
return model_ret
def create_mems_tf(bsz_per_core):
mems = [tf.placeholder(dtype=tf.float32,
shape=[FLAGS.mem_len, bsz_per_core, FLAGS.d_model])
for layer in range(FLAGS.n_layer)]
return mems
def initialize_mems_np(bsz_per_core):
mems_np = [np.zeros(shape=[FLAGS.mem_len, bsz_per_core, FLAGS.d_model],
dtype=np.float32)
for layer in range(FLAGS.n_layer)]
return mems_np
def train(ps_device):
h_size,
seq_len=FLAGS.seq_len,
reuse_len=FLAGS.reuse_len,
bi_data=FLAGS.bi_data,
num_hosts=1,
num_core_per_host=1,
perm_size=FLAGS.perm_size,
mask_alpha=FLAGS.mask_alpha,
mask_beta=FLAGS.mask_beta,
uncased=FLAGS.uncased,
num_passes=FLAGS.num_passes,
use_bfloat16=FLAGS.use_bfloat16,
num_predict=FLAGS.num_predict)
tf.logging.info("num of batches {}".format(record_info_dict["num_batch"]))
t_fn(params)
example = train_set.make_one_shot_iterator().get_next()
if FLAGS.num_core_per_host > 1:
examples = [{} for _ in range(FLAGS.num_core_per_host)]
for key in example.keys():
vals = tf.split(example[key], FLAGS.num_core_per_host, 0)
for device_id in range(FLAGS.num_core_per_host):
examples[device_id][key] = vals[device_id]
else:
examples = [example]
r_host):
reuse = True if i > 0 else None
with tf.device(assign_to_gpu(i, ps_device)), \
tf.variable_scope(tf.get_variable_scope(), reuse=reuse):
mems_i = {}
if FLAGS.mem_len:
mems_i["mems"] = create_mems_tf(bsz_per_core)
loss_i, new_mems_i, grads_and_vars_i = single_core_graph(
is_training=True,
features=examples[i],
mems=mems_i)
tower_mems.append(mems_i)
tower_losses.append(loss_i)
tower_new_mems.append(new_mems_i)
tower_grads_and_vars.append(grads_and_vars_i)
dd_n(tower_losses) / len(tower_losses)
grads_and_vars = average_grads_and_vars(tower_grads_and_vars)
else:
loss = tower_losses[0]
grads_and_vars = tower_grads_and_vars[0]
earning_rate, gnorm = model_utils.get_train_op(FLAGS, None,
grads_and_vars=grads_and_vars)
global_step = tf.train.get_global_step()
:
mems_i_np = {}
for key in tower_mems[i].keys():
mems_i_np[key] = initialize_mems_np(bsz_per_core)
tower_mems_np.append(mems_i_np)
saver = tf.train.Saver()
gpu_options = tf.GPUOptions(allow_growth=True)
model_utils.init_from_checkpoint(FLAGS, global_vars=True)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
gpu_options=gpu_options)) as sess:
sess.run(tf.global_variables_initializer())
fetches = [loss, tower_new_mems, global_step, gnorm, learning_rate, train_op]
total_loss, prev_step = 0., -1
while True:
feed_dict = {}
for i in range(FLAGS.num_core_per_host):
for key in tower_mems_np[i].keys():
for m, m_np in zip(tower_mems[i][key], tower_mems_np[i][key]):
feed_dict[m] = m_np
fetched = sess.run(fetches, feed_dict=feed_dict)
loss_np, tower_mems_np, curr_step = fetched[:3]
total_loss += loss_np
if curr_step > 0 and curr_step % FLAGS.iterations == 0:
curr_loss = total_loss / (curr_step - prev_step)
tf.logging.info("[{}] | gnorm {:.2f} lr {:8.6f} "
"| loss {:.2f} | pplx {:>7.2f}, bpc {:>7.4f}".format(
curr_step, fetched[-3], fetched[-2],
curr_loss, math.exp(curr_loss), curr_loss / math.log(2)))
total_loss, prev_step = 0., curr_step
if curr_step > 0 and curr_step % FLAGS.save_steps == 0:
save_path = os.path.join(FLAGS.model_dir, "model.ckpt")
saver.save(sess, save_path)
tf.logging.info("Model saved in path: {}".format(save_path))
if curr_step >= FLAGS.train_steps:
break
def main(unused_argv):
del unused_argv
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS.n_token = data_utils.VOCAB_SIZE
tf.logging.info("n_token {}".format(FLAGS.n_token))
if not tf.gfile.Exists(FLAGS.model_dir):
tf.gfile.MakeDirs(FLAGS.model_dir)
train("/gpu:0")
if __name__ == "__main__":
tf.app.run()
| true
| true
|
790c0365b6a5328cccebb59c901eebb402383e63
| 2,608
|
py
|
Python
|
src/models/dnn_regressor.py
|
onurerkin/prohack
|
51665841de04de4a7d44a3aeacec8e9142110cea
|
[
"MIT"
] | null | null | null |
src/models/dnn_regressor.py
|
onurerkin/prohack
|
51665841de04de4a7d44a3aeacec8e9142110cea
|
[
"MIT"
] | null | null | null |
src/models/dnn_regressor.py
|
onurerkin/prohack
|
51665841de04de4a7d44a3aeacec8e9142110cea
|
[
"MIT"
] | null | null | null |
from typing import Tuple, Union
import numpy as np
import pandas as pd
import tensorflow as tf
from src.models.dnn_regressor_funcs import (
_compile_model,
_create_keras_model,
_fit_model,
_to_input_list,
)
def predict(model: tf.keras.Model, X_test: pd.DataFrame, cate_cols: list) -> np.array:
"""
predict function
Args:
model: keras model fit by fit_model
X_test: Test features
cate_cols: categorical columns list
Returns: y_pred
"""
X_test_list = _to_input_list(df=X_test, cate_cols=cate_cols)
y_pred = model.predict(X_test_list)
return y_pred
def train(
X_train: pd.DataFrame,
y_train: Union[pd.Series, np.array],
X_val: pd.DataFrame,
y_val: Union[pd.Series, np.array],
layers: list,
num_classes: int,
cate_cols: list,
learning_rate: float,
epochs: int,
batch_size: int,
dropout_rate: float = 0.3,
) -> Tuple[tf.keras.callbacks.History, tf.keras.Model]:
"""
Training main function that takes dataset and parameters as input and returns the trained model with history
Args:
X_train: Train features
y_train: train labels
X_val: Validation labels
y_val: validation labels
layers: List of nodes in hidden layers
num_classes: Number of classes in target variable
cate_cols: categorical columns list
learning_rate: learning rate
epochs: number of epochs
batch_size: batch size
dropout_rate: dropout rate
Returns: history of training, trained model
"""
X_train_list = _to_input_list(df=X_train, cate_cols=cate_cols)
X_val_list = _to_input_list(df=X_val, cate_cols=cate_cols)
# if len(y_train.shape) == 1:
# y_train_categorical = tf.keras.utils.to_categorical(
# y_train, num_classes=num_classes, dtype="float32"
# )
#
# y_val_categorical = tf.keras.utils.to_categorical(
# y_val, num_classes=num_classes, dtype="float32"
# )
y_train = np.array(y_train)
y_val = np.array(y_val)
model = _create_keras_model(
X_train=X_train,
layers=layers,
num_classes=num_classes,
dropout_rate=dropout_rate,
cate_cols=cate_cols,
)
_compile_model(model=model, num_classes=num_classes, learning_rate=learning_rate)
history = _fit_model(
model=model,
X_train_list=X_train_list,
y_train=y_train,
X_val_list=X_val_list,
y_val=y_val,
epochs=epochs,
batch_size=batch_size,
)
return history, model
| 26.612245
| 112
| 0.662577
|
from typing import Tuple, Union
import numpy as np
import pandas as pd
import tensorflow as tf
from src.models.dnn_regressor_funcs import (
_compile_model,
_create_keras_model,
_fit_model,
_to_input_list,
)
def predict(model: tf.keras.Model, X_test: pd.DataFrame, cate_cols: list) -> np.array:
X_test_list = _to_input_list(df=X_test, cate_cols=cate_cols)
y_pred = model.predict(X_test_list)
return y_pred
def train(
X_train: pd.DataFrame,
y_train: Union[pd.Series, np.array],
X_val: pd.DataFrame,
y_val: Union[pd.Series, np.array],
layers: list,
num_classes: int,
cate_cols: list,
learning_rate: float,
epochs: int,
batch_size: int,
dropout_rate: float = 0.3,
) -> Tuple[tf.keras.callbacks.History, tf.keras.Model]:
X_train_list = _to_input_list(df=X_train, cate_cols=cate_cols)
X_val_list = _to_input_list(df=X_val, cate_cols=cate_cols)
y_train = np.array(y_train)
y_val = np.array(y_val)
model = _create_keras_model(
X_train=X_train,
layers=layers,
num_classes=num_classes,
dropout_rate=dropout_rate,
cate_cols=cate_cols,
)
_compile_model(model=model, num_classes=num_classes, learning_rate=learning_rate)
history = _fit_model(
model=model,
X_train_list=X_train_list,
y_train=y_train,
X_val_list=X_val_list,
y_val=y_val,
epochs=epochs,
batch_size=batch_size,
)
return history, model
| true
| true
|
790c0387f5c26bd279ccf1f3bf58e2fc4b20c3bf
| 924
|
py
|
Python
|
tests/pycmd.py
|
JeremyMarshall/pymake
|
29ca1ef4abb8138eb70e7d4da3c4d983b5b0ec00
|
[
"MIT"
] | 2
|
2021-07-11T12:48:11.000Z
|
2022-01-10T13:56:33.000Z
|
tests/pycmd.py
|
fcostin/pymake
|
7f1ea154f7ae592f614225db486dd79cdbe40190
|
[
"MIT"
] | null | null | null |
tests/pycmd.py
|
fcostin/pymake
|
7f1ea154f7ae592f614225db486dd79cdbe40190
|
[
"MIT"
] | 3
|
2019-10-05T00:54:06.000Z
|
2021-03-15T05:09:11.000Z
|
import os, sys, subprocess
def writetofile(args):
with open(args[0], 'w') as f:
f.write(' '.join(args[1:]))
def writeenvtofile(args):
with open(args[0], 'w') as f:
f.write(os.environ[args[1]])
def writesubprocessenvtofile(args):
with open(args[0], 'w') as f:
p = subprocess.Popen([sys.executable, "-c",
"import os; print(os.environ['%s'])" % args[1]],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
assert p.returncode == 0
f.write(stdout.decode("utf-8"))
def convertasplode(arg):
try:
return int(arg)
except:
return (None if arg == "None" else arg)
def asplode(args):
arg0 = convertasplode(args[0])
sys.exit(arg0)
def asplode_return(args):
arg0 = convertasplode(args[0])
return arg0
def asplode_raise(args):
raise Exception(args[0])
def delayloadfn(args):
import delayload
| 23.692308
| 74
| 0.627706
|
import os, sys, subprocess
def writetofile(args):
with open(args[0], 'w') as f:
f.write(' '.join(args[1:]))
def writeenvtofile(args):
with open(args[0], 'w') as f:
f.write(os.environ[args[1]])
def writesubprocessenvtofile(args):
with open(args[0], 'w') as f:
p = subprocess.Popen([sys.executable, "-c",
"import os; print(os.environ['%s'])" % args[1]],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
assert p.returncode == 0
f.write(stdout.decode("utf-8"))
def convertasplode(arg):
try:
return int(arg)
except:
return (None if arg == "None" else arg)
def asplode(args):
arg0 = convertasplode(args[0])
sys.exit(arg0)
def asplode_return(args):
arg0 = convertasplode(args[0])
return arg0
def asplode_raise(args):
raise Exception(args[0])
def delayloadfn(args):
import delayload
| true
| true
|
790c047d0e094bd72415b297f8d9e7f95d092341
| 367
|
py
|
Python
|
webapp/graphite/events/admin.py
|
drax68/graphite-web
|
c4a26ebb60fda1b15b49f3a284fe9753c3af10dd
|
[
"Apache-2.0"
] | null | null | null |
webapp/graphite/events/admin.py
|
drax68/graphite-web
|
c4a26ebb60fda1b15b49f3a284fe9753c3af10dd
|
[
"Apache-2.0"
] | null | null | null |
webapp/graphite/events/admin.py
|
drax68/graphite-web
|
c4a26ebb60fda1b15b49f3a284fe9753c3af10dd
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from graphite.events.models import Event
class EventsAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
'fields': ('when', 'what', 'data', 'tags',)
}),
)
list_display = ('when', 'what', 'data',)
list_filter = ('what',)
search_fields = ('tags', )
admin.site.register(Event, EventsAdmin)
| 22.9375
| 55
| 0.594005
|
from django.contrib import admin
from graphite.events.models import Event
class EventsAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
'fields': ('when', 'what', 'data', 'tags',)
}),
)
list_display = ('when', 'what', 'data',)
list_filter = ('what',)
search_fields = ('tags', )
admin.site.register(Event, EventsAdmin)
| true
| true
|
790c05788ad1a1b173c818378c6d269007646d2d
| 4,213
|
py
|
Python
|
qmcpy/stopping_criterion/_cub_qmc_ld_g.py
|
QMCSoftware/QMCSoftware
|
dbd774d635eb269e77c48526b980f62c23214617
|
[
"Apache-2.0"
] | 40
|
2019-09-15T03:31:17.000Z
|
2022-02-19T19:52:10.000Z
|
qmcpy/stopping_criterion/_cub_qmc_ld_g.py
|
QMCSoftware/QMCSoftware
|
dbd774d635eb269e77c48526b980f62c23214617
|
[
"Apache-2.0"
] | 152
|
2019-10-06T17:26:02.000Z
|
2022-03-01T04:17:04.000Z
|
qmcpy/stopping_criterion/_cub_qmc_ld_g.py
|
QMCSoftware/QMCSoftware
|
dbd774d635eb269e77c48526b980f62c23214617
|
[
"Apache-2.0"
] | 16
|
2019-09-17T23:33:48.000Z
|
2021-07-19T22:38:45.000Z
|
from ._stopping_criterion import StoppingCriterion
from ..accumulate_data import LDTransformData
from ..util import MaxSamplesWarning, ParameterError, ParameterWarning
from numpy import *
from time import time
import warnings
class CubQMCLDG(StoppingCriterion):
"""
Abstract class for CubQMC{LD}G where LD is a low discrepancy discrete distribution.
See subclasses for implementation differences for each LD sequence.
"""
def __init__(self, integrand, abs_tol, rel_tol, n_init, n_max, fudge, check_cone,
control_variates, control_variate_means, update_beta, ptransform,
coefv, allowed_levels, allowed_distribs, cast_complex):
self.parameters = ['abs_tol','rel_tol','n_init','n_max']
# Input Checks
self.abs_tol = float(abs_tol)
self.rel_tol = float(rel_tol)
m_min = log2(n_init)
m_max = log2(n_max)
if m_min%1 != 0. or m_min < 8. or m_max%1 != 0.:
warning_s = '''
n_init and n_max must be a powers of 2.
n_init must be >= 2^8.
Using n_init = 2^10 and n_max=2^35.'''
warnings.warn(warning_s, ParameterWarning)
m_min = 10.
m_max = 35.
self.n_init = 2.**m_min
self.n_max = 2.**m_max
self.m_min = m_min
self.m_max = m_max
self.fudge = fudge
self.check_cone = check_cone
self.coefv = coefv
self.ptransform = ptransform
self.cast_complex = cast_complex
# QMCPy Objs
self.integrand = integrand
self.true_measure = self.integrand.true_measure
self.discrete_distrib = self.integrand.discrete_distrib
self.cv = control_variates
self.cv_mu = control_variate_means
self.ub = update_beta
# Verify Compliant Construction
super(CubQMCLDG,self).__init__(allowed_levels, allowed_distribs, allow_vectorized_integrals=False)
def integrate(self):
""" See abstract method. """
# Construct AccumulateData Object to House Integration data
self.data = LDTransformData(self, self.integrand, self.true_measure, self.discrete_distrib,
self.coefv, self.m_min, self.m_max, self.fudge, self.check_cone, ptransform=self.ptransform,
cast_complex=self.cast_complex, control_variates=self.cv, control_variate_means=self.cv_mu, update_beta=self.ub)
t_start = time()
while True:
self.data.update_data()
# Check the end of the algorithm
self.data.error_bound = self.data.fudge(self.data.m)*self.data.stilde
# Compute optimal estimator
ub = max(self.abs_tol, self.rel_tol*abs(self.data.solution + self.data.error_bound))
lb = max(self.abs_tol, self.rel_tol*abs(self.data.solution - self.data.error_bound))
self.data.solution = self.data.solution - self.data.error_bound*(ub-lb) / (ub+lb)
if 4*self.data.error_bound**2./(ub+lb)**2. <= 1.:
# stopping criterion met
break
elif self.data.m == self.data.m_max:
# doubling samples would go over n_max
warning_s = """
Alread generated %d samples.
Trying to generate %d new samples would exceed n_max = %d.
No more samples will be generated.
Note that error tolerances may no longer be satisfied""" \
% (int(2**self.data.m), int(2**self.data.m), int(2**self.data.m_max))
warnings.warn(warning_s, MaxSamplesWarning)
break
else:
# double sample size
self.data.m += 1.
self.data.time_integrate = time() - t_start
return self.data.solution, self.data
def set_tolerance(self, abs_tol=None, rel_tol=None):
"""
See abstract method.
Args:
abs_tol (float): absolute tolerance. Reset if supplied, ignored if not.
rel_tol (float): relative tolerance. Reset if supplied, ignored if not.
"""
if abs_tol != None: self.abs_tol = abs_tol
if rel_tol != None: self.rel_tol = rel_tol
| 44.347368
| 124
| 0.618562
|
from ._stopping_criterion import StoppingCriterion
from ..accumulate_data import LDTransformData
from ..util import MaxSamplesWarning, ParameterError, ParameterWarning
from numpy import *
from time import time
import warnings
class CubQMCLDG(StoppingCriterion):
def __init__(self, integrand, abs_tol, rel_tol, n_init, n_max, fudge, check_cone,
control_variates, control_variate_means, update_beta, ptransform,
coefv, allowed_levels, allowed_distribs, cast_complex):
self.parameters = ['abs_tol','rel_tol','n_init','n_max']
self.abs_tol = float(abs_tol)
self.rel_tol = float(rel_tol)
m_min = log2(n_init)
m_max = log2(n_max)
if m_min%1 != 0. or m_min < 8. or m_max%1 != 0.:
warning_s = '''
n_init and n_max must be a powers of 2.
n_init must be >= 2^8.
Using n_init = 2^10 and n_max=2^35.'''
warnings.warn(warning_s, ParameterWarning)
m_min = 10.
m_max = 35.
self.n_init = 2.**m_min
self.n_max = 2.**m_max
self.m_min = m_min
self.m_max = m_max
self.fudge = fudge
self.check_cone = check_cone
self.coefv = coefv
self.ptransform = ptransform
self.cast_complex = cast_complex
self.integrand = integrand
self.true_measure = self.integrand.true_measure
self.discrete_distrib = self.integrand.discrete_distrib
self.cv = control_variates
self.cv_mu = control_variate_means
self.ub = update_beta
super(CubQMCLDG,self).__init__(allowed_levels, allowed_distribs, allow_vectorized_integrals=False)
def integrate(self):
self.data = LDTransformData(self, self.integrand, self.true_measure, self.discrete_distrib,
self.coefv, self.m_min, self.m_max, self.fudge, self.check_cone, ptransform=self.ptransform,
cast_complex=self.cast_complex, control_variates=self.cv, control_variate_means=self.cv_mu, update_beta=self.ub)
t_start = time()
while True:
self.data.update_data()
self.data.error_bound = self.data.fudge(self.data.m)*self.data.stilde
ub = max(self.abs_tol, self.rel_tol*abs(self.data.solution + self.data.error_bound))
lb = max(self.abs_tol, self.rel_tol*abs(self.data.solution - self.data.error_bound))
self.data.solution = self.data.solution - self.data.error_bound*(ub-lb) / (ub+lb)
if 4*self.data.error_bound**2./(ub+lb)**2. <= 1.:
break
elif self.data.m == self.data.m_max:
warning_s = """
Alread generated %d samples.
Trying to generate %d new samples would exceed n_max = %d.
No more samples will be generated.
Note that error tolerances may no longer be satisfied""" \
% (int(2**self.data.m), int(2**self.data.m), int(2**self.data.m_max))
warnings.warn(warning_s, MaxSamplesWarning)
break
else:
self.data.m += 1.
self.data.time_integrate = time() - t_start
return self.data.solution, self.data
def set_tolerance(self, abs_tol=None, rel_tol=None):
if abs_tol != None: self.abs_tol = abs_tol
if rel_tol != None: self.rel_tol = rel_tol
| true
| true
|
790c064b3a857462c7d2f2561d92dc148405fbb6
| 3,445
|
py
|
Python
|
pipe-cli/src/api/user.py
|
NShaforostov/cloud-pipeline
|
8d25b2b5f4838be569d9c25a307b77df5b0e73fc
|
[
"Apache-2.0"
] | null | null | null |
pipe-cli/src/api/user.py
|
NShaforostov/cloud-pipeline
|
8d25b2b5f4838be569d9c25a307b77df5b0e73fc
|
[
"Apache-2.0"
] | 1
|
2019-03-25T10:32:44.000Z
|
2019-03-27T17:45:19.000Z
|
pipe-cli/src/api/user.py
|
NShaforostov/cloud-pipeline
|
8d25b2b5f4838be569d9c25a307b77df5b0e73fc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from src.api.entity import Entity
from .base import API
import json
from ..model.object_permission_model import ObjectPermissionModel
class User(API):
def __init__(self):
super(User, self).__init__()
@classmethod
def get_permissions(cls, identifier, acl_class):
entity = Entity.load_by_id_or_name(identifier, acl_class)
return cls.permissions(entity['id'], entity['aclClass']), entity['owner']
@classmethod
def permissions(cls, id, acl_class):
api = cls.instance()
response_data = api.call('permissions?id={}&aclClass={}'.format(id, acl_class.upper()), None)
if 'payload' in response_data and 'permissions' in response_data['payload']:
permissions = []
for permission_json in response_data['payload']['permissions']:
permission_object = ObjectPermissionModel.load(permission_json)
permission_object.parse_mask(True)
permissions.append(permission_object)
return permissions
else:
return []
@classmethod
def grant_permission(cls, identifier, acl_class, user_name, principal, mask):
api = cls.instance()
payload = {}
if acl_class is not None:
payload['aclClass'] = acl_class.upper()
if identifier is not None:
payload['id'] = identifier
if mask is not None:
payload['mask'] = mask
if principal is not None:
payload['principal'] = principal
if user_name is not None:
payload['userName'] = user_name
data = json.dumps(payload)
api.call('grant', data)
@classmethod
def change_owner(cls, user_name, class_name, object_id):
api = cls.instance()
response_data = api.call('/grant/owner?userName={}&aclClass={}&id={}'.format(
user_name, str(class_name).upper(), object_id), None, http_method='POST')
if 'payload' in response_data and 'entity' in response_data['payload']:
return response_data['payload']['entity']
if 'message' in response_data:
raise RuntimeError(response_data['message'])
else:
raise RuntimeError("Failed to change owner.")
@classmethod
def generate_user_token(cls, user_name, duration):
api = cls.instance()
query = '/user/token?name=%s' % user_name
if duration:
query = '&expiration='.join([query, str(duration)])
response_data = api.call(query, None)
if 'payload' in response_data and 'token' in response_data['payload']:
return response_data['payload']['token']
if 'message' in response_data:
raise RuntimeError(response_data['message'])
else:
raise RuntimeError("Failed to generate user token.")
| 40.529412
| 101
| 0.649057
|
from src.api.entity import Entity
from .base import API
import json
from ..model.object_permission_model import ObjectPermissionModel
class User(API):
def __init__(self):
super(User, self).__init__()
@classmethod
def get_permissions(cls, identifier, acl_class):
entity = Entity.load_by_id_or_name(identifier, acl_class)
return cls.permissions(entity['id'], entity['aclClass']), entity['owner']
@classmethod
def permissions(cls, id, acl_class):
api = cls.instance()
response_data = api.call('permissions?id={}&aclClass={}'.format(id, acl_class.upper()), None)
if 'payload' in response_data and 'permissions' in response_data['payload']:
permissions = []
for permission_json in response_data['payload']['permissions']:
permission_object = ObjectPermissionModel.load(permission_json)
permission_object.parse_mask(True)
permissions.append(permission_object)
return permissions
else:
return []
@classmethod
def grant_permission(cls, identifier, acl_class, user_name, principal, mask):
api = cls.instance()
payload = {}
if acl_class is not None:
payload['aclClass'] = acl_class.upper()
if identifier is not None:
payload['id'] = identifier
if mask is not None:
payload['mask'] = mask
if principal is not None:
payload['principal'] = principal
if user_name is not None:
payload['userName'] = user_name
data = json.dumps(payload)
api.call('grant', data)
@classmethod
def change_owner(cls, user_name, class_name, object_id):
api = cls.instance()
response_data = api.call('/grant/owner?userName={}&aclClass={}&id={}'.format(
user_name, str(class_name).upper(), object_id), None, http_method='POST')
if 'payload' in response_data and 'entity' in response_data['payload']:
return response_data['payload']['entity']
if 'message' in response_data:
raise RuntimeError(response_data['message'])
else:
raise RuntimeError("Failed to change owner.")
@classmethod
def generate_user_token(cls, user_name, duration):
api = cls.instance()
query = '/user/token?name=%s' % user_name
if duration:
query = '&expiration='.join([query, str(duration)])
response_data = api.call(query, None)
if 'payload' in response_data and 'token' in response_data['payload']:
return response_data['payload']['token']
if 'message' in response_data:
raise RuntimeError(response_data['message'])
else:
raise RuntimeError("Failed to generate user token.")
| true
| true
|
790c08692041c9b9e4ee1ccd4b7d5c190c472cac
| 2,159
|
py
|
Python
|
app/core/models.py
|
shadow-smoke/recipe-app-api
|
b5a75be5eb2a541accc39772a23d542fac1d5a69
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
shadow-smoke/recipe-app-api
|
b5a75be5eb2a541accc39772a23d542fac1d5a69
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
shadow-smoke/recipe-app-api
|
b5a75be5eb2a541accc39772a23d542fac1d5a69
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
# Create your models here.
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Create and saves a new user"""
if not email:
raise ValueError('Users must have email address')
user = self.model(email = self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=225, unique=True)
name = models.CharField(max_length=225)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD ='email'
class Tag(models.Model):
name = models.CharField(max_length=225)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Ingredient(models.Model):
name = models.CharField(max_length=225)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Recipe(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
def __str__(self):
return self.title
| 28.786667
| 78
| 0.677165
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
if not email:
raise ValueError('Users must have email address')
user = self.model(email = self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=225, unique=True)
name = models.CharField(max_length=225)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD ='email'
class Tag(models.Model):
name = models.CharField(max_length=225)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Ingredient(models.Model):
name = models.CharField(max_length=225)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Recipe(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
def __str__(self):
return self.title
| true
| true
|
790c096958b7b2279aefcd8bf59cc4a8ff135be6
| 283
|
py
|
Python
|
serverless/flask-server/config.py
|
adrianaarcia/YPool
|
054b54495c7daedc8eb88b39d33d5b365a1e63ca
|
[
"MIT"
] | null | null | null |
serverless/flask-server/config.py
|
adrianaarcia/YPool
|
054b54495c7daedc8eb88b39d33d5b365a1e63ca
|
[
"MIT"
] | null | null | null |
serverless/flask-server/config.py
|
adrianaarcia/YPool
|
054b54495c7daedc8eb88b39d33d5b365a1e63ca
|
[
"MIT"
] | null | null | null |
class Config:
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
ACL = 'public-read'
FLASKS3_BUCKET_NAME = os.environ.get('FLASKS3_BUCKET_NAME')
FLASKS3_REGION = os.environ.get('FLASKS3_REGION')
| 47.166667
| 67
| 0.756184
|
class Config:
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
ACL = 'public-read'
FLASKS3_BUCKET_NAME = os.environ.get('FLASKS3_BUCKET_NAME')
FLASKS3_REGION = os.environ.get('FLASKS3_REGION')
| true
| true
|
790c09a67cd70b4ba95bd72c8d02670852786701
| 10
|
py
|
Python
|
example_snippets/multimenus_snippets/Snippets/Python/Regular expressions/Compilation flags/Enable verbose REs, for cleaner and more organized code.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | null | null | null |
example_snippets/multimenus_snippets/Snippets/Python/Regular expressions/Compilation flags/Enable verbose REs, for cleaner and more organized code.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | null | null | null |
example_snippets/multimenus_snippets/Snippets/Python/Regular expressions/Compilation flags/Enable verbose REs, for cleaner and more organized code.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | 1
|
2021-02-04T04:51:48.000Z
|
2021-02-04T04:51:48.000Z
|
re.VERBOSE
| 10
| 10
| 0.9
|
re.VERBOSE
| true
| true
|
790c0a12c970bdb08194aad4038039ba6fce2039
| 8,304
|
py
|
Python
|
rasa_core/interpreter.py
|
chumakovvchuma/rasa_core
|
fd4d310220ec76338ca90ca8c298ddbaa576849f
|
[
"Apache-2.0"
] | 1
|
2019-06-08T00:26:56.000Z
|
2019-06-08T00:26:56.000Z
|
rasa_core/interpreter.py
|
chumakovvchuma/rasa_core
|
fd4d310220ec76338ca90ca8c298ddbaa576849f
|
[
"Apache-2.0"
] | null | null | null |
rasa_core/interpreter.py
|
chumakovvchuma/rasa_core
|
fd4d310220ec76338ca90ca8c298ddbaa576849f
|
[
"Apache-2.0"
] | 2
|
2018-08-02T11:28:10.000Z
|
2020-07-11T23:03:16.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import logging
import re
import os
import requests
from builtins import str
from typing import Text, List, Dict, Any
logger = logging.getLogger(__name__)
INTENT_MESSAGE_PREFIX = "/"
class NaturalLanguageInterpreter(object):
def parse(self, text):
raise NotImplementedError(
"Interpreter needs to be able to parse "
"messages into structured output.")
@staticmethod
def create(obj):
if isinstance(obj, NaturalLanguageInterpreter):
return obj
if isinstance(obj, str):
return RasaNLUInterpreter(model_directory=obj)
else:
return RegexInterpreter() # default interpreter
class RegexInterpreter(NaturalLanguageInterpreter):
@staticmethod
def allowed_prefixes():
return INTENT_MESSAGE_PREFIX + "_" # _ is deprecated but supported
@staticmethod
def _create_entities(parsed_entities, sidx, eidx):
entities = []
for k, vs in parsed_entities.items():
if not isinstance(vs, list):
vs = [vs]
for value in vs:
entities.append({
"entity": k,
"start": sidx,
"end": eidx, # can't be more specific
"value": value
})
return entities
@staticmethod
def _parse_parameters(entitiy_str, sidx, eidx, user_input):
# type: (Text, int, int, Text) -> List[Dict[Text, Any]]
if entitiy_str is None or not entitiy_str.strip():
# if there is nothing to parse we will directly exit
return []
try:
parsed_entities = json.loads(entitiy_str)
if isinstance(parsed_entities, dict):
return RegexInterpreter._create_entities(parsed_entities,
sidx, eidx)
else:
raise Exception("Parsed value isn't a json object "
"(instead parser found '{}')"
".".format(type(parsed_entities)))
except Exception as e:
logger.warning("Invalid to parse arguments in line "
"'{}'. Failed to decode parameters"
"as a json object. Make sure the intent"
"followed by a proper json object. "
"Error: {}".format(user_input, e))
return []
@staticmethod
def extract_intent_and_entities(user_input):
# type: (Text) -> object
"""Parse the user input using regexes to extract intent & entities."""
prefixes = re.escape(RegexInterpreter.allowed_prefixes())
# the regex matches "slot{"a": 1}"
m = re.search('^['+prefixes+']?([^{]+)([{].+)?', user_input)
if m is not None:
event_name = m.group(1).strip()
entities = RegexInterpreter._parse_parameters(m.group(2),
m.start(2),
m.end(2),
user_input)
return event_name, entities
else:
logger.warning("Failed to parse intent end entities from "
"'{}'. ".format(user_input))
return None, []
@staticmethod
def deprecated_extraction(user_input):
"""DEPRECATED parse of user input message."""
value_assign_rx = '\s*(.+)\s*=\s*(.+)\s*'
prefixes = re.escape(RegexInterpreter.allowed_prefixes())
structured_message_rx = '^['+prefixes+']?([^\[]+)(\[(.+)\])?'
m = re.search(structured_message_rx, user_input)
if m is not None:
intent = m.group(1).lower()
offset = m.start(3)
entities_str = m.group(3)
entities = []
if entities_str is not None:
for entity_str in entities_str.split(','):
for match in re.finditer(value_assign_rx, entity_str):
start = match.start(2) + offset
end = match.end(0) + offset
entity = {
"entity": match.group(1),
"start": start,
"end": end,
"value": match.group(2)}
entities.append(entity)
return intent, entities
else:
return None, []
@staticmethod
def is_using_deprecated_format(text):
"""Indicates if the text string is using the deprecated intent format.
In the deprecated format entities where annotated using `[name=Rasa]`
which has been replaced with `{"name": "Rasa"}`."""
return (text.find("[") != -1
and (text.find("{") == -1 or
text.find("[") < text.find("{")))
def parse(self, text):
"""Parse a text message."""
if self.is_using_deprecated_format(text):
intent, entities = self.deprecated_extraction(text)
else:
intent, entities = self.extract_intent_and_entities(text)
return {
'text': text,
'intent': {
'name': intent,
'confidence': 1.0,
},
'intent_ranking': [{
'name': intent,
'confidence': 1.0,
}],
'entities': entities,
}
class RasaNLUHttpInterpreter(NaturalLanguageInterpreter):
def __init__(self, model_name=None, token=None, server='http://localhost:5000', project_name='default'):
self.model_name = model_name
self.token = token
self.server = server
self.project_name = project_name
def parse(self, text):
"""Parse a text message.
Return a default value if the parsing of the text failed."""
default_return = {"intent": {"name": "", "confidence": 0.0},
"entities": [], "text": ""}
result = self._rasa_http_parse(text)
return result if result is not None else default_return
def _rasa_http_parse(self, text):
"""Send a text message to a running rasa NLU http server.
Return `None` on failure."""
if not self.server:
logger.error(
"Failed to parse text '{}' using rasa NLU over http. "
"No rasa NLU server specified!".format(text))
return None
params = {
"token": self.token,
"model": self.model_name,
"project": self.project_name,
"q": text
}
url = "{}/parse".format(self.server)
try:
result = requests.get(url, params=params)
if result.status_code == 200:
return result.json()
else:
logger.error(
"Failed to parse text '{}' using rasa NLU over http. "
"Error: {}".format(text, result.text))
return None
except Exception as e:
logger.error(
"Failed to parse text '{}' using rasa NLU over http. "
"Error: {}".format(text, e))
return None
class RasaNLUInterpreter(NaturalLanguageInterpreter):
def __init__(self, model_directory, config_file=None, lazy_init=False):
self.model_directory = model_directory
self.lazy_init = lazy_init
self.config_file = config_file
if not lazy_init:
self._load_interpreter()
else:
self.interpreter = None
def parse(self, text):
"""Parse a text message.
Return a default value if the parsing of the text failed."""
if self.lazy_init and self.interpreter is None:
self._load_interpreter()
return self.interpreter.parse(text)
def _load_interpreter(self):
from rasa_nlu.model import Interpreter
self.interpreter = Interpreter.load(self.model_directory)
| 34.890756
| 108
| 0.53408
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import logging
import re
import os
import requests
from builtins import str
from typing import Text, List, Dict, Any
logger = logging.getLogger(__name__)
INTENT_MESSAGE_PREFIX = "/"
class NaturalLanguageInterpreter(object):
def parse(self, text):
raise NotImplementedError(
"Interpreter needs to be able to parse "
"messages into structured output.")
@staticmethod
def create(obj):
if isinstance(obj, NaturalLanguageInterpreter):
return obj
if isinstance(obj, str):
return RasaNLUInterpreter(model_directory=obj)
else:
return RegexInterpreter()
class RegexInterpreter(NaturalLanguageInterpreter):
@staticmethod
def allowed_prefixes():
return INTENT_MESSAGE_PREFIX + "_"
@staticmethod
def _create_entities(parsed_entities, sidx, eidx):
entities = []
for k, vs in parsed_entities.items():
if not isinstance(vs, list):
vs = [vs]
for value in vs:
entities.append({
"entity": k,
"start": sidx,
"end": eidx,
"value": value
})
return entities
@staticmethod
def _parse_parameters(entitiy_str, sidx, eidx, user_input):
# type: (Text, int, int, Text) -> List[Dict[Text, Any]]
if entitiy_str is None or not entitiy_str.strip():
# if there is nothing to parse we will directly exit
return []
try:
parsed_entities = json.loads(entitiy_str)
if isinstance(parsed_entities, dict):
return RegexInterpreter._create_entities(parsed_entities,
sidx, eidx)
else:
raise Exception("Parsed value isn't a json object "
"(instead parser found '{}')"
".".format(type(parsed_entities)))
except Exception as e:
logger.warning("Invalid to parse arguments in line "
"'{}'. Failed to decode parameters"
"as a json object. Make sure the intent"
"followed by a proper json object. "
"Error: {}".format(user_input, e))
return []
@staticmethod
def extract_intent_and_entities(user_input):
prefixes = re.escape(RegexInterpreter.allowed_prefixes())
m = re.search('^['+prefixes+']?([^{]+)([{].+)?', user_input)
if m is not None:
event_name = m.group(1).strip()
entities = RegexInterpreter._parse_parameters(m.group(2),
m.start(2),
m.end(2),
user_input)
return event_name, entities
else:
logger.warning("Failed to parse intent end entities from "
"'{}'. ".format(user_input))
return None, []
@staticmethod
def deprecated_extraction(user_input):
value_assign_rx = '\s*(.+)\s*=\s*(.+)\s*'
prefixes = re.escape(RegexInterpreter.allowed_prefixes())
structured_message_rx = '^['+prefixes+']?([^\[]+)(\[(.+)\])?'
m = re.search(structured_message_rx, user_input)
if m is not None:
intent = m.group(1).lower()
offset = m.start(3)
entities_str = m.group(3)
entities = []
if entities_str is not None:
for entity_str in entities_str.split(','):
for match in re.finditer(value_assign_rx, entity_str):
start = match.start(2) + offset
end = match.end(0) + offset
entity = {
"entity": match.group(1),
"start": start,
"end": end,
"value": match.group(2)}
entities.append(entity)
return intent, entities
else:
return None, []
@staticmethod
def is_using_deprecated_format(text):
return (text.find("[") != -1
and (text.find("{") == -1 or
text.find("[") < text.find("{")))
def parse(self, text):
if self.is_using_deprecated_format(text):
intent, entities = self.deprecated_extraction(text)
else:
intent, entities = self.extract_intent_and_entities(text)
return {
'text': text,
'intent': {
'name': intent,
'confidence': 1.0,
},
'intent_ranking': [{
'name': intent,
'confidence': 1.0,
}],
'entities': entities,
}
class RasaNLUHttpInterpreter(NaturalLanguageInterpreter):
def __init__(self, model_name=None, token=None, server='http://localhost:5000', project_name='default'):
self.model_name = model_name
self.token = token
self.server = server
self.project_name = project_name
def parse(self, text):
default_return = {"intent": {"name": "", "confidence": 0.0},
"entities": [], "text": ""}
result = self._rasa_http_parse(text)
return result if result is not None else default_return
def _rasa_http_parse(self, text):
if not self.server:
logger.error(
"Failed to parse text '{}' using rasa NLU over http. "
"No rasa NLU server specified!".format(text))
return None
params = {
"token": self.token,
"model": self.model_name,
"project": self.project_name,
"q": text
}
url = "{}/parse".format(self.server)
try:
result = requests.get(url, params=params)
if result.status_code == 200:
return result.json()
else:
logger.error(
"Failed to parse text '{}' using rasa NLU over http. "
"Error: {}".format(text, result.text))
return None
except Exception as e:
logger.error(
"Failed to parse text '{}' using rasa NLU over http. "
"Error: {}".format(text, e))
return None
class RasaNLUInterpreter(NaturalLanguageInterpreter):
def __init__(self, model_directory, config_file=None, lazy_init=False):
self.model_directory = model_directory
self.lazy_init = lazy_init
self.config_file = config_file
if not lazy_init:
self._load_interpreter()
else:
self.interpreter = None
def parse(self, text):
if self.lazy_init and self.interpreter is None:
self._load_interpreter()
return self.interpreter.parse(text)
def _load_interpreter(self):
from rasa_nlu.model import Interpreter
self.interpreter = Interpreter.load(self.model_directory)
| true
| true
|
790c0ab6d4a5c8d645baf0732e9614934d07bce0
| 963
|
py
|
Python
|
rally/common/objects/__init__.py
|
varuntiwari27/rally
|
948fba0e8fe8214dd3716451d2a52e014a4115be
|
[
"Apache-2.0"
] | 1
|
2021-09-29T02:16:09.000Z
|
2021-09-29T02:16:09.000Z
|
rally/common/objects/__init__.py
|
noah8713/rally-ovs
|
2434787c2cf4ca267108966c4ddc55ded3c333d9
|
[
"Apache-2.0"
] | 1
|
2020-07-14T11:29:31.000Z
|
2020-07-14T11:29:31.000Z
|
rally/common/objects/__init__.py
|
noah8713/rally-ovs
|
2434787c2cf4ca267108966c4ddc55ded3c333d9
|
[
"Apache-2.0"
] | 1
|
2020-07-02T01:33:48.000Z
|
2020-07-02T01:33:48.000Z
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Contains the Rally objects."""
from rally.common.objects.credential import Credential # noqa
from rally.common.objects.deploy import Deployment # noqa
from rally.common.objects.endpoint import Endpoint # noqa
from rally.common.objects.task import Task # noqa
from rally.common.objects.verification import Verification # noqa
| 43.772727
| 78
| 0.751817
|
from rally.common.objects.credential import Credential
from rally.common.objects.deploy import Deployment
from rally.common.objects.endpoint import Endpoint
from rally.common.objects.task import Task
from rally.common.objects.verification import Verification
| true
| true
|
790c0bbb8b3c151ca984c6bbfd8ac6bf4ffb3861
| 3,747
|
py
|
Python
|
cohesity_management_sdk/models/protection_info.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18
|
2019-09-24T17:35:53.000Z
|
2022-03-25T08:08:47.000Z
|
cohesity_management_sdk/models/protection_info.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18
|
2019-03-29T19:32:29.000Z
|
2022-01-03T23:16:45.000Z
|
cohesity_management_sdk/models/protection_info.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 16
|
2019-02-27T06:54:12.000Z
|
2021-11-16T18:10:24.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class ProtectionInfo(object):
"""Implementation of the 'ProtectionInfo' model.
dataLocation defines data location related information.
Attributes:
end_time_usecs (long|int): Specifies the end time for object
retention.
location (string): Specifies the location of the object.
policy_id (string): Specifies the id of the policy.
protection_job_id (long|int): Specifies the id of the protection job.
protection_job_name (string): Specifies the protection job name which
protects this object.
retention_period (long|int): Specifies the retention period.
start_time_usecs (long|int): Specifies the start time for object
retention.
storage_domain (string): Specifies the storage domain name.
total_snapshots (long|int): Specifies the total number of snapshots.
"""
# Create a mapping from Model property names to API property names
_names = {
"end_time_usecs":'endTimeUsecs',
"location":'location',
"policy_id":'policyId',
"protection_job_id":'protectionJobId',
"protection_job_name":'protectionJobName',
"retention_period":'retentionPeriod',
"start_time_usecs":'startTimeUsecs',
"storage_domain":'storageDomain',
"total_snapshots":'totalSnapshots'
}
def __init__(self,
end_time_usecs=None,
location=None,
policy_id=None,
protection_job_id=None,
protection_job_name=None,
retention_period=None,
start_time_usecs=None,
storage_domain=None,
total_snapshots=None):
"""Constructor for the ProtectionInfo class"""
# Initialize members of the class
self.end_time_usecs = end_time_usecs
self.location = location
self.policy_id = policy_id
self.protection_job_id = protection_job_id
self.protection_job_name = protection_job_name
self.retention_period = retention_period
self.start_time_usecs = start_time_usecs
self.storage_domain = storage_domain
self.total_snapshots = total_snapshots
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
end_time_usecs = dictionary.get('endTimeUsecs')
location = dictionary.get('location')
policy_id = dictionary.get('policyId')
protection_job_id = dictionary.get('protectionJobId')
protection_job_name = dictionary.get('protectionJobName')
retention_period = dictionary.get('retentionPeriod')
start_time_usecs = dictionary.get('startTimeUsecs')
storage_domain = dictionary.get('storageDomain')
total_snapshots = dictionary.get('totalSnapshots')
# Return an object of this model
return cls(end_time_usecs,
location,
policy_id,
protection_job_id,
protection_job_name,
retention_period,
start_time_usecs,
storage_domain,
total_snapshots)
| 36.028846
| 81
| 0.62957
|
class ProtectionInfo(object):
_names = {
"end_time_usecs":'endTimeUsecs',
"location":'location',
"policy_id":'policyId',
"protection_job_id":'protectionJobId',
"protection_job_name":'protectionJobName',
"retention_period":'retentionPeriod',
"start_time_usecs":'startTimeUsecs',
"storage_domain":'storageDomain',
"total_snapshots":'totalSnapshots'
}
def __init__(self,
end_time_usecs=None,
location=None,
policy_id=None,
protection_job_id=None,
protection_job_name=None,
retention_period=None,
start_time_usecs=None,
storage_domain=None,
total_snapshots=None):
self.end_time_usecs = end_time_usecs
self.location = location
self.policy_id = policy_id
self.protection_job_id = protection_job_id
self.protection_job_name = protection_job_name
self.retention_period = retention_period
self.start_time_usecs = start_time_usecs
self.storage_domain = storage_domain
self.total_snapshots = total_snapshots
@classmethod
def from_dictionary(cls,
dictionary):
if dictionary is None:
return None
end_time_usecs = dictionary.get('endTimeUsecs')
location = dictionary.get('location')
policy_id = dictionary.get('policyId')
protection_job_id = dictionary.get('protectionJobId')
protection_job_name = dictionary.get('protectionJobName')
retention_period = dictionary.get('retentionPeriod')
start_time_usecs = dictionary.get('startTimeUsecs')
storage_domain = dictionary.get('storageDomain')
total_snapshots = dictionary.get('totalSnapshots')
return cls(end_time_usecs,
location,
policy_id,
protection_job_id,
protection_job_name,
retention_period,
start_time_usecs,
storage_domain,
total_snapshots)
| true
| true
|
790c0c6d96d735e5f20cfba9cd8e5195649d5bbf
| 222,718
|
py
|
Python
|
mypy/semanal.py
|
timwraight/mypy
|
583b99e5096c979043a63dcf92bf3f0d5993d03a
|
[
"PSF-2.0"
] | null | null | null |
mypy/semanal.py
|
timwraight/mypy
|
583b99e5096c979043a63dcf92bf3f0d5993d03a
|
[
"PSF-2.0"
] | null | null | null |
mypy/semanal.py
|
timwraight/mypy
|
583b99e5096c979043a63dcf92bf3f0d5993d03a
|
[
"PSF-2.0"
] | null | null | null |
"""The new semantic analyzer (work in progress).
Bind names to definitions and do various other simple consistency
checks. It also detects special forms such as NamedTuple and cast().
Multiple analysis iterations may be needed to analyze forward
references and import cycles. Each iteration "fills in" additional
bindings and references until everything has been bound.
For example, consider this program:
x = 1
y = x
Here semantic analysis would detect that the assignment 'x = 1'
defines a new variable, the type of which is to be inferred (in a
later pass; type inference or type checking is not part of semantic
analysis). Also, it would bind both references to 'x' to the same
module-level variable (Var) node. The second assignment would also
be analyzed, and the type of 'y' marked as being inferred.
Semantic analysis of types is implemented in typeanal.py.
See semanal_main.py for the top-level logic.
Some important properties:
* After semantic analysis is complete, no PlaceholderNode and
PlaceholderType instances should remain. During semantic analysis,
if we encounter one of these, the current target should be deferred.
* A TypeInfo is only created once we know certain basic information about
a type, such as the MRO, existence of a Tuple base class (e.g., for named
tuples), and whether we have a TypedDict. We use a temporary
PlaceholderNode node in the symbol table if some such information is
missing.
* For assignments, we only add a non-placeholder symbol table entry once
we know the sort of thing being defined (variable, NamedTuple, type alias,
etc.).
* Every part of the analysis step must support multiple iterations over
the same AST nodes, and each iteration must be able to fill in arbitrary
things that were missing or incomplete in previous iterations.
* Changes performed by the analysis need to be reversible, since mypy
daemon strips and reuses existing ASTs (to improve performance and/or
reduce memory use).
"""
from contextlib import contextmanager
from typing import (
List, Dict, Set, Tuple, cast, TypeVar, Union, Optional, Callable, Iterator, Iterable,
)
from typing_extensions import Final
from mypy.nodes import (
MypyFile, TypeInfo, Node, AssignmentStmt, FuncDef, OverloadedFuncDef,
ClassDef, Var, GDEF, FuncItem, Import, Expression, Lvalue,
ImportFrom, ImportAll, Block, LDEF, NameExpr, MemberExpr,
IndexExpr, TupleExpr, ListExpr, ExpressionStmt, ReturnStmt,
RaiseStmt, AssertStmt, OperatorAssignmentStmt, WhileStmt,
ForStmt, BreakStmt, ContinueStmt, IfStmt, TryStmt, WithStmt, DelStmt,
GlobalDecl, SuperExpr, DictExpr, CallExpr, RefExpr, OpExpr, UnaryExpr,
SliceExpr, CastExpr, RevealExpr, TypeApplication, Context, SymbolTable,
SymbolTableNode, ListComprehension, GeneratorExpr,
LambdaExpr, MDEF, Decorator, SetExpr, TypeVarExpr,
StrExpr, BytesExpr, PrintStmt, ConditionalExpr, PromoteExpr,
ComparisonExpr, StarExpr, ARG_POS, ARG_NAMED, type_aliases,
YieldFromExpr, NamedTupleExpr, NonlocalDecl, SymbolNode,
SetComprehension, DictionaryComprehension, TypeAlias, TypeAliasExpr,
YieldExpr, ExecStmt, BackquoteExpr, ImportBase, AwaitExpr,
IntExpr, FloatExpr, UnicodeExpr, TempNode, OverloadPart,
PlaceholderNode, COVARIANT, CONTRAVARIANT, INVARIANT,
nongen_builtins, get_member_expr_fullname, REVEAL_TYPE,
REVEAL_LOCALS, is_final_node, TypedDictExpr, type_aliases_target_versions,
EnumCallExpr, RUNTIME_PROTOCOL_DECOS, FakeExpression, Statement, AssignmentExpr,
)
from mypy.tvar_scope import TypeVarScope
from mypy.typevars import fill_typevars
from mypy.visitor import NodeVisitor
from mypy.errors import Errors, report_internal_error
from mypy.messages import best_matches, MessageBuilder, pretty_or
from mypy.errorcodes import ErrorCode
from mypy import message_registry, errorcodes as codes
from mypy.types import (
FunctionLike, UnboundType, TypeVarDef, TupleType, UnionType, StarType,
CallableType, Overloaded, Instance, Type, AnyType, LiteralType, LiteralValue,
TypeTranslator, TypeOfAny, TypeType, NoneType, PlaceholderType, TPDICT_NAMES, ProperType,
get_proper_type, get_proper_types
)
from mypy.typeops import function_type
from mypy.type_visitor import TypeQuery
from mypy.nodes import implicit_module_attrs
from mypy.typeanal import (
TypeAnalyser, analyze_type_alias, no_subscript_builtin_alias,
TypeVariableQuery, TypeVarList, remove_dups, has_any_from_unimported_type,
check_for_explicit_any, type_constructors, fix_instance_types
)
from mypy.exprtotype import expr_to_unanalyzed_type, TypeTranslationError
from mypy.options import Options
from mypy.plugin import (
Plugin, ClassDefContext, SemanticAnalyzerPluginInterface,
DynamicClassDefContext
)
from mypy.util import correct_relative_import, unmangle, module_prefix
from mypy.scope import Scope
from mypy.semanal_shared import (
SemanticAnalyzerInterface, set_callable_name, calculate_tuple_fallback, PRIORITY_FALLBACKS
)
from mypy.semanal_namedtuple import NamedTupleAnalyzer
from mypy.semanal_typeddict import TypedDictAnalyzer
from mypy.semanal_enum import EnumCallAnalyzer
from mypy.semanal_newtype import NewTypeAnalyzer
from mypy.reachability import (
infer_reachability_of_if_statement, infer_condition_value, ALWAYS_FALSE, ALWAYS_TRUE,
MYPY_TRUE, MYPY_FALSE
)
from mypy.mro import calculate_mro, MroError
T = TypeVar('T')
# Map from the full name of a missing definition to the test fixture (under
# test-data/unit/fixtures/) that provides the definition. This is used for
# generating better error messages when running mypy tests only.
SUGGESTED_TEST_FIXTURES = {
'builtins.list': 'list.pyi',
'builtins.dict': 'dict.pyi',
'builtins.set': 'set.pyi',
'builtins.bool': 'bool.pyi',
'builtins.Exception': 'exception.pyi',
'builtins.BaseException': 'exception.pyi',
'builtins.isinstance': 'isinstancelist.pyi',
'builtins.property': 'property.pyi',
'builtins.classmethod': 'classmethod.pyi',
} # type: Final
TYPES_FOR_UNIMPORTED_HINTS = {
'typing.Any',
'typing.Callable',
'typing.Dict',
'typing.Iterable',
'typing.Iterator',
'typing.List',
'typing.Optional',
'typing.Set',
'typing.Tuple',
'typing.TypeVar',
'typing.Union',
'typing.cast',
} # type: Final
# Special cased built-in classes that are needed for basic functionality and need to be
# available very early on.
CORE_BUILTIN_CLASSES = ['object', 'bool', 'function'] # type: Final
# Used for tracking incomplete references
Tag = int
class SemanticAnalyzer(NodeVisitor[None],
SemanticAnalyzerInterface,
SemanticAnalyzerPluginInterface):
"""Semantically analyze parsed mypy files.
The analyzer binds names and does various consistency checks for an
AST. Note that type checking is performed as a separate pass.
"""
# Module name space
modules = None # type: Dict[str, MypyFile]
# Global name space for current module
globals = None # type: SymbolTable
# Names declared using "global" (separate set for each scope)
global_decls = None # type: List[Set[str]]
# Names declated using "nonlocal" (separate set for each scope)
nonlocal_decls = None # type: List[Set[str]]
# Local names of function scopes; None for non-function scopes.
locals = None # type: List[Optional[SymbolTable]]
# Whether each scope is a comprehension scope.
is_comprehension_stack = None # type: List[bool]
# Nested block depths of scopes
block_depth = None # type: List[int]
# TypeInfo of directly enclosing class (or None)
type = None # type: Optional[TypeInfo]
# Stack of outer classes (the second tuple item contains tvars).
type_stack = None # type: List[Optional[TypeInfo]]
# Type variables bound by the current scope, be it class or function
tvar_scope = None # type: TypeVarScope
# Per-module options
options = None # type: Options
# Stack of functions being analyzed
function_stack = None # type: List[FuncItem]
# Set to True if semantic analysis defines a name, or replaces a
# placeholder definition. If some iteration makes no progress,
# there can be at most one additional final iteration (see below).
progress = False
deferred = False # Set to true if another analysis pass is needed
incomplete = False # Set to true if current module namespace is missing things
# Is this the final iteration of semantic analysis (where we report
# unbound names due to cyclic definitions and should not defer)?
_final_iteration = False
# These names couldn't be added to the symbol table due to incomplete deps.
# Note that missing names are per module, _not_ per namespace. This means that e.g.
# a missing name at global scope will block adding same name at a class scope.
# This should not affect correctness and is purely a performance issue,
# since it can cause unnecessary deferrals. These are represented as
# PlaceholderNodes in the symbol table. We use this to ensure that the first
# definition takes precedence even if it's incomplete.
#
# Note that a star import adds a special name '*' to the set, this blocks
# adding _any_ names in the current file.
missing_names = None # type: Set[str]
# Callbacks that will be called after semantic analysis to tweak things.
patches = None # type: List[Tuple[int, Callable[[], None]]]
loop_depth = 0 # Depth of breakable loops
cur_mod_id = '' # Current module id (or None) (phase 2)
is_stub_file = False # Are we analyzing a stub file?
_is_typeshed_stub_file = False # Are we analyzing a typeshed stub file?
imports = None # type: Set[str] # Imported modules (during phase 2 analysis)
# Note: some imports (and therefore dependencies) might
# not be found in phase 1, for example due to * imports.
errors = None # type: Errors # Keeps track of generated errors
plugin = None # type: Plugin # Mypy plugin for special casing of library features
statement = None # type: Optional[Statement] # Statement/definition being analyzed
def __init__(self,
modules: Dict[str, MypyFile],
missing_modules: Set[str],
incomplete_namespaces: Set[str],
errors: Errors,
plugin: Plugin) -> None:
"""Construct semantic analyzer.
We reuse the same semantic analyzer instance across multiple modules.
Args:
modules: Global modules dictionary
missing_modules: Modules that could not be imported encountered so far
incomplete_namespaces: Namespaces that are being populated during semantic analysis
(can contain modules and classes within the current SCC; mutated by the caller)
errors: Report analysis errors using this instance
"""
self.locals = [None]
self.is_comprehension_stack = [False]
# Saved namespaces from previous iteration. Every top-level function/method body is
# analyzed in several iterations until all names are resolved. We need to save
# the local namespaces for the top level function and all nested functions between
# these iterations. See also semanal_main.process_top_level_function().
self.saved_locals = {} \
# type: Dict[Union[FuncItem, GeneratorExpr, DictionaryComprehension], SymbolTable]
self.imports = set()
self.type = None
self.type_stack = []
self.tvar_scope = TypeVarScope()
self.function_stack = []
self.block_depth = [0]
self.loop_depth = 0
self.errors = errors
self.modules = modules
self.msg = MessageBuilder(errors, modules)
self.missing_modules = missing_modules
# These namespaces are still in process of being populated. If we encounter a
# missing name in these namespaces, we need to defer the current analysis target,
# since it's possible that the name will be there once the namespace is complete.
self.incomplete_namespaces = incomplete_namespaces
self.all_exports = [] # type: List[str]
# Map from module id to list of explicitly exported names (i.e. names in __all__).
self.export_map = {} # type: Dict[str, List[str]]
self.plugin = plugin
# If True, process function definitions. If False, don't. This is used
# for processing module top levels in fine-grained incremental mode.
self.recurse_into_functions = True
self.scope = Scope()
# Trace line numbers for every file where deferral happened during analysis of
# current SCC or top-level function.
self.deferral_debug_context = [] # type: List[Tuple[str, int]]
# mypyc doesn't properly handle implementing an abstractproperty
# with a regular attribute so we make them properties
@property
def is_typeshed_stub_file(self) -> bool:
return self._is_typeshed_stub_file
@property
def final_iteration(self) -> bool:
return self._final_iteration
#
# Preparing module (performed before semantic analysis)
#
def prepare_file(self, file_node: MypyFile) -> None:
"""Prepare a freshly parsed file for semantic analysis."""
if 'builtins' in self.modules:
file_node.names['__builtins__'] = SymbolTableNode(GDEF,
self.modules['builtins'])
if file_node.fullname() == 'builtins':
self.prepare_builtins_namespace(file_node)
if file_node.fullname() == 'typing':
self.prepare_typing_namespace(file_node)
def prepare_typing_namespace(self, file_node: MypyFile) -> None:
"""Remove dummy alias definitions such as List = TypeAlias(object) from typing.
They will be replaced with real aliases when corresponding targets are ready.
"""
for stmt in file_node.defs.copy():
if (isinstance(stmt, AssignmentStmt) and len(stmt.lvalues) == 1 and
isinstance(stmt.lvalues[0], NameExpr)):
# Assignment to a simple name, remove it if it is a dummy alias.
if 'typing.' + stmt.lvalues[0].name in type_aliases:
file_node.defs.remove(stmt)
def prepare_builtins_namespace(self, file_node: MypyFile) -> None:
"""Add certain special-cased definitions to the builtins module.
Some definitions are too special or fundamental to be processed
normally from the AST.
"""
names = file_node.names
# Add empty definition for core built-in classes, since they are required for basic
# operation. These will be completed later on.
for name in CORE_BUILTIN_CLASSES:
cdef = ClassDef(name, Block([])) # Dummy ClassDef, will be replaced later
info = TypeInfo(SymbolTable(), cdef, 'builtins')
info._fullname = 'builtins.%s' % name
names[name] = SymbolTableNode(GDEF, info)
bool_info = names['bool'].node
assert isinstance(bool_info, TypeInfo)
bool_type = Instance(bool_info, [])
special_var_types = [
('None', NoneType()),
# reveal_type is a mypy-only function that gives an error with
# the type of its arg.
('reveal_type', AnyType(TypeOfAny.special_form)),
# reveal_locals is a mypy-only function that gives an error with the types of
# locals
('reveal_locals', AnyType(TypeOfAny.special_form)),
('True', bool_type),
('False', bool_type),
('__debug__', bool_type),
] # type: List[Tuple[str, Type]]
for name, typ in special_var_types:
v = Var(name, typ)
v._fullname = 'builtins.%s' % name
file_node.names[name] = SymbolTableNode(GDEF, v)
#
# Analyzing a target
#
def refresh_partial(self,
node: Union[MypyFile, FuncDef, OverloadedFuncDef],
patches: List[Tuple[int, Callable[[], None]]],
final_iteration: bool,
file_node: MypyFile,
options: Options,
active_type: Optional[TypeInfo] = None) -> None:
"""Refresh a stale target in fine-grained incremental mode."""
self.patches = patches
self.deferred = False
self.incomplete = False
self._final_iteration = final_iteration
self.missing_names = set()
with self.file_context(file_node, options, active_type):
if isinstance(node, MypyFile):
self.refresh_top_level(node)
else:
self.recurse_into_functions = True
self.accept(node)
del self.patches
def refresh_top_level(self, file_node: MypyFile) -> None:
"""Reanalyze a stale module top-level in fine-grained incremental mode."""
self.recurse_into_functions = False
self.add_implicit_module_attrs(file_node)
for d in file_node.defs:
self.accept(d)
if file_node.fullname() == 'typing':
self.add_builtin_aliases(file_node)
self.adjust_public_exports()
self.export_map[self.cur_mod_id] = self.all_exports
self.all_exports = []
def add_implicit_module_attrs(self, file_node: MypyFile) -> None:
"""Manually add implicit definitions of module '__name__' etc."""
for name, t in implicit_module_attrs.items():
# unicode docstrings should be accepted in Python 2
if name == '__doc__':
if self.options.python_version >= (3, 0):
typ = UnboundType('__builtins__.str') # type: Type
else:
typ = UnionType([UnboundType('__builtins__.str'),
UnboundType('__builtins__.unicode')])
else:
assert t is not None, 'type should be specified for {}'.format(name)
typ = UnboundType(t)
existing = file_node.names.get(name)
if existing is not None and not isinstance(existing.node, PlaceholderNode):
# Already exists.
continue
an_type = self.anal_type(typ)
if an_type:
var = Var(name, an_type)
var._fullname = self.qualified_name(name)
var.is_ready = True
self.add_symbol(name, var, dummy_context())
else:
self.add_symbol(name,
PlaceholderNode(self.qualified_name(name), file_node, -1),
dummy_context())
def add_builtin_aliases(self, tree: MypyFile) -> None:
"""Add builtin type aliases to typing module.
For historical reasons, the aliases like `List = list` are not defined
in typeshed stubs for typing module. Instead we need to manually add the
corresponding nodes on the fly. We explicitly mark these aliases as normalized,
so that a user can write `typing.List[int]`.
"""
assert tree.fullname() == 'typing'
for alias, target_name in type_aliases.items():
if type_aliases_target_versions[alias] > self.options.python_version:
# This alias is not available on this Python version.
continue
name = alias.split('.')[-1]
if name in tree.names and not isinstance(tree.names[name].node, PlaceholderNode):
continue
tag = self.track_incomplete_refs()
n = self.lookup_fully_qualified_or_none(target_name)
if n:
if isinstance(n.node, PlaceholderNode):
self.mark_incomplete(name, tree)
else:
# Found built-in class target. Create alias.
target = self.named_type_or_none(target_name, [])
assert target is not None
# Transform List to List[Any], etc.
fix_instance_types(target, self.fail)
alias_node = TypeAlias(target, alias,
line=-1, column=-1, # there is no context
no_args=True, normalized=True)
self.add_symbol(name, alias_node, tree)
elif self.found_incomplete_ref(tag):
# Built-in class target may not ready yet -- defer.
self.mark_incomplete(name, tree)
else:
# Test fixtures may be missing some builtin classes, which is okay.
# Kill the placeholder if there is one.
if name in tree.names:
assert isinstance(tree.names[name].node, PlaceholderNode)
del tree.names[name]
def adjust_public_exports(self) -> None:
"""Adjust the module visibility of globals due to __all__."""
if '__all__' in self.globals:
for name, g in self.globals.items():
# Being included in __all__ explicitly exports and makes public.
if name in self.all_exports:
g.module_public = True
g.module_hidden = False
# But when __all__ is defined, and a symbol is not included in it,
# it cannot be public.
else:
g.module_public = False
@contextmanager
def file_context(self,
file_node: MypyFile,
options: Options,
active_type: Optional[TypeInfo] = None) -> Iterator[None]:
"""Configure analyzer for analyzing targets within a file/class.
Args:
file_node: target file
options: options specific to the file
active_type: must be the surrounding class to analyze method targets
"""
scope = self.scope
self.options = options
self.errors.set_file(file_node.path, file_node.fullname(), scope=scope)
self.cur_mod_node = file_node
self.cur_mod_id = file_node.fullname()
scope.enter_file(self.cur_mod_id)
self.is_stub_file = file_node.path.lower().endswith('.pyi')
self._is_typeshed_stub_file = self.errors.is_typeshed_file(file_node.path)
self.globals = file_node.names
self.tvar_scope = TypeVarScope()
self.named_tuple_analyzer = NamedTupleAnalyzer(options, self)
self.typed_dict_analyzer = TypedDictAnalyzer(options, self, self.msg)
self.enum_call_analyzer = EnumCallAnalyzer(options, self)
self.newtype_analyzer = NewTypeAnalyzer(options, self, self.msg)
# Counter that keeps track of references to undefined things potentially caused by
# incomplete namespaces.
self.num_incomplete_refs = 0
if active_type:
scope.enter_class(active_type)
self.enter_class(active_type.defn.info)
for tvar in active_type.defn.type_vars:
self.tvar_scope.bind_existing(tvar)
yield
if active_type:
scope.leave()
self.leave_class()
self.type = None
scope.leave()
del self.options
#
# Functions
#
def visit_func_def(self, defn: FuncDef) -> None:
self.statement = defn
# Visit default values because they may contain assignment expressions.
for arg in defn.arguments:
if arg.initializer:
arg.initializer.accept(self)
defn.is_conditional = self.block_depth[-1] > 0
# Set full names even for those definitions that aren't added
# to a symbol table. For example, for overload items.
defn._fullname = self.qualified_name(defn.name())
# We don't add module top-level functions to symbol tables
# when we analyze their bodies in the second phase on analysis,
# since they were added in the first phase. Nested functions
# get always added, since they aren't separate targets.
if not self.recurse_into_functions or len(self.function_stack) > 0:
if not defn.is_decorated and not defn.is_overload:
self.add_function_to_symbol_table(defn)
if not self.recurse_into_functions:
return
with self.scope.function_scope(defn):
self.analyze_func_def(defn)
def analyze_func_def(self, defn: FuncDef) -> None:
self.function_stack.append(defn)
if defn.type:
assert isinstance(defn.type, CallableType)
self.update_function_type_variables(defn.type, defn)
self.function_stack.pop()
if self.is_class_scope():
# Method definition
assert self.type is not None
defn.info = self.type
if defn.type is not None and defn.name() in ('__init__', '__init_subclass__'):
assert isinstance(defn.type, CallableType)
if isinstance(get_proper_type(defn.type.ret_type), AnyType):
defn.type = defn.type.copy_modified(ret_type=NoneType())
self.prepare_method_signature(defn, self.type)
# Analyze function signature
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
if defn.type:
self.check_classvar_in_signature(defn.type)
assert isinstance(defn.type, CallableType)
# Signature must be analyzed in the surrounding scope so that
# class-level imported names and type variables are in scope.
analyzer = self.type_analyzer()
tag = self.track_incomplete_refs()
result = analyzer.visit_callable_type(defn.type, nested=False)
# Don't store not ready types (including placeholders).
if self.found_incomplete_ref(tag) or has_placeholder(result):
self.defer(defn)
return
assert isinstance(result, ProperType)
defn.type = result
self.add_type_alias_deps(analyzer.aliases_used)
self.check_function_signature(defn)
if isinstance(defn, FuncDef):
assert isinstance(defn.type, CallableType)
defn.type = set_callable_name(defn.type, defn)
self.analyze_arg_initializers(defn)
self.analyze_function_body(defn)
if defn.is_coroutine and isinstance(defn.type, CallableType) and not self.deferred:
if defn.is_async_generator:
# Async generator types are handled elsewhere
pass
else:
# A coroutine defined as `async def foo(...) -> T: ...`
# has external return type `Coroutine[Any, Any, T]`.
any_type = AnyType(TypeOfAny.special_form)
ret_type = self.named_type_or_none('typing.Coroutine',
[any_type, any_type, defn.type.ret_type])
assert ret_type is not None, "Internal error: typing.Coroutine not found"
defn.type = defn.type.copy_modified(ret_type=ret_type)
def prepare_method_signature(self, func: FuncDef, info: TypeInfo) -> None:
"""Check basic signature validity and tweak annotation of self/cls argument."""
# Only non-static methods are special.
functype = func.type
if not func.is_static:
if func.name() == '__init_subclass__':
func.is_class = True
if not func.arguments:
self.fail('Method must have at least one argument', func)
elif isinstance(functype, CallableType):
self_type = get_proper_type(functype.arg_types[0])
if isinstance(self_type, AnyType):
leading_type = fill_typevars(info) # type: Type
if func.is_class or func.name() == '__new__':
leading_type = self.class_type(leading_type)
func.type = replace_implicit_first_type(functype, leading_type)
def set_original_def(self, previous: Optional[Node], new: Union[FuncDef, Decorator]) -> bool:
"""If 'new' conditionally redefine 'previous', set 'previous' as original
We reject straight redefinitions of functions, as they are usually
a programming error. For example:
def f(): ...
def f(): ... # Error: 'f' redefined
"""
if isinstance(new, Decorator):
new = new.func
if isinstance(previous, (FuncDef, Var, Decorator)) and new.is_conditional:
new.original_def = previous
return True
else:
return False
def update_function_type_variables(self, fun_type: CallableType, defn: FuncItem) -> None:
"""Make any type variables in the signature of defn explicit.
Update the signature of defn to contain type variable definitions
if defn is generic.
"""
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
a = self.type_analyzer()
fun_type.variables = a.bind_function_type_variables(fun_type, defn)
def visit_overloaded_func_def(self, defn: OverloadedFuncDef) -> None:
self.statement = defn
self.add_function_to_symbol_table(defn)
if not self.recurse_into_functions:
return
# NB: Since _visit_overloaded_func_def will call accept on the
# underlying FuncDefs, the function might get entered twice.
# This is fine, though, because only the outermost function is
# used to compute targets.
with self.scope.function_scope(defn):
self.analyze_overloaded_func_def(defn)
def analyze_overloaded_func_def(self, defn: OverloadedFuncDef) -> None:
# OverloadedFuncDef refers to any legitimate situation where you have
# more than one declaration for the same function in a row. This occurs
# with a @property with a setter or a deleter, and for a classic
# @overload.
defn._fullname = self.qualified_name(defn.name())
# TODO: avoid modifying items.
defn.items = defn.unanalyzed_items.copy()
first_item = defn.items[0]
first_item.is_overload = True
first_item.accept(self)
if isinstance(first_item, Decorator) and first_item.func.is_property:
# This is a property.
first_item.func.is_overload = True
self.analyze_property_with_multi_part_definition(defn)
typ = function_type(first_item.func, self.builtin_type('builtins.function'))
assert isinstance(typ, CallableType)
types = [typ]
else:
# This is an a normal overload. Find the item signatures, the
# implementation (if outside a stub), and any missing @overload
# decorators.
types, impl, non_overload_indexes = self.analyze_overload_sigs_and_impl(defn)
defn.impl = impl
if non_overload_indexes:
self.handle_missing_overload_decorators(defn, non_overload_indexes,
some_overload_decorators=len(types) > 0)
# If we found an implementation, remove it from the overload item list,
# as it's special.
if impl is not None:
assert impl is defn.items[-1]
defn.items = defn.items[:-1]
elif not non_overload_indexes:
self.handle_missing_overload_implementation(defn)
if types:
defn.type = Overloaded(types)
defn.type.line = defn.line
if not defn.items:
# It was not a real overload after all, but function redefinition. We've
# visited the redefinition(s) already.
if not defn.impl:
# For really broken overloads with no items and no implementation we need to keep
# at least one item to hold basic information like function name.
defn.impl = defn.unanalyzed_items[-1]
return
# We know this is an overload def. Infer properties and perform some checks.
self.process_final_in_overload(defn)
self.process_static_or_class_method_in_overload(defn)
def analyze_overload_sigs_and_impl(
self,
defn: OverloadedFuncDef) -> Tuple[List[CallableType],
Optional[OverloadPart],
List[int]]:
"""Find overload signatures, the implementation, and items with missing @overload.
Assume that the first was already analyzed. As a side effect:
analyzes remaining items and updates 'is_overload' flags.
"""
types = []
non_overload_indexes = []
impl = None # type: Optional[OverloadPart]
for i, item in enumerate(defn.items):
if i != 0:
# Assume that the first item was already visited
item.is_overload = True
item.accept(self)
# TODO: support decorated overloaded functions properly
if isinstance(item, Decorator):
callable = function_type(item.func, self.builtin_type('builtins.function'))
assert isinstance(callable, CallableType)
if not any(refers_to_fullname(dec, 'typing.overload')
for dec in item.decorators):
if i == len(defn.items) - 1 and not self.is_stub_file:
# Last item outside a stub is impl
impl = item
else:
# Oops it wasn't an overload after all. A clear error
# will vary based on where in the list it is, record
# that.
non_overload_indexes.append(i)
else:
item.func.is_overload = True
types.append(callable)
elif isinstance(item, FuncDef):
if i == len(defn.items) - 1 and not self.is_stub_file:
impl = item
else:
non_overload_indexes.append(i)
return types, impl, non_overload_indexes
def handle_missing_overload_decorators(self,
defn: OverloadedFuncDef,
non_overload_indexes: List[int],
some_overload_decorators: bool) -> None:
"""Generate errors for overload items without @overload.
Side effect: remote non-overload items.
"""
if some_overload_decorators:
# Some of them were overloads, but not all.
for idx in non_overload_indexes:
if self.is_stub_file:
self.fail("An implementation for an overloaded function "
"is not allowed in a stub file", defn.items[idx])
else:
self.fail("The implementation for an overloaded function "
"must come last", defn.items[idx])
else:
for idx in non_overload_indexes[1:]:
self.name_already_defined(defn.name(), defn.items[idx], defn.items[0])
if defn.impl:
self.name_already_defined(defn.name(), defn.impl, defn.items[0])
# Remove the non-overloads
for idx in reversed(non_overload_indexes):
del defn.items[idx]
def handle_missing_overload_implementation(self, defn: OverloadedFuncDef) -> None:
"""Generate error about missing overload implementation (only if needed)."""
if not self.is_stub_file:
if self.type and self.type.is_protocol and not self.is_func_scope():
# An overloded protocol method doesn't need an implementation.
for item in defn.items:
if isinstance(item, Decorator):
item.func.is_abstract = True
else:
item.is_abstract = True
else:
self.fail(
"An overloaded function outside a stub file must have an implementation",
defn)
def process_final_in_overload(self, defn: OverloadedFuncDef) -> None:
"""Detect the @final status of an overloaded function (and perform checks)."""
# If the implementation is marked as @final (or the first overload in
# stubs), then the whole overloaded definition if @final.
if any(item.is_final for item in defn.items):
# We anyway mark it as final because it was probably the intention.
defn.is_final = True
# Only show the error once per overload
bad_final = next(ov for ov in defn.items if ov.is_final)
if not self.is_stub_file:
self.fail("@final should be applied only to overload implementation",
bad_final)
elif any(item.is_final for item in defn.items[1:]):
bad_final = next(ov for ov in defn.items[1:] if ov.is_final)
self.fail("In a stub file @final must be applied only to the first overload",
bad_final)
if defn.impl is not None and defn.impl.is_final:
defn.is_final = True
def process_static_or_class_method_in_overload(self, defn: OverloadedFuncDef) -> None:
class_status = []
static_status = []
for item in defn.items:
if isinstance(item, Decorator):
inner = item.func
elif isinstance(item, FuncDef):
inner = item
else:
assert False, "The 'item' variable is an unexpected type: {}".format(type(item))
class_status.append(inner.is_class)
static_status.append(inner.is_static)
if defn.impl is not None:
if isinstance(defn.impl, Decorator):
inner = defn.impl.func
elif isinstance(defn.impl, FuncDef):
inner = defn.impl
else:
assert False, "Unexpected impl type: {}".format(type(defn.impl))
class_status.append(inner.is_class)
static_status.append(inner.is_static)
if len(set(class_status)) != 1:
self.msg.overload_inconsistently_applies_decorator('classmethod', defn)
elif len(set(static_status)) != 1:
self.msg.overload_inconsistently_applies_decorator('staticmethod', defn)
else:
defn.is_class = class_status[0]
defn.is_static = static_status[0]
def analyze_property_with_multi_part_definition(self, defn: OverloadedFuncDef) -> None:
"""Analyze a property defined using multiple methods (e.g., using @x.setter).
Assume that the first method (@property) has already been analyzed.
"""
defn.is_property = True
items = defn.items
first_item = cast(Decorator, defn.items[0])
for item in items[1:]:
if isinstance(item, Decorator) and len(item.decorators) == 1:
node = item.decorators[0]
if isinstance(node, MemberExpr):
if node.name == 'setter':
# The first item represents the entire property.
first_item.var.is_settable_property = True
# Get abstractness from the original definition.
item.func.is_abstract = first_item.func.is_abstract
else:
self.fail("Decorated property not supported", item)
if isinstance(item, Decorator):
item.func.accept(self)
def add_function_to_symbol_table(self, func: Union[FuncDef, OverloadedFuncDef]) -> None:
if self.is_class_scope():
assert self.type is not None
func.info = self.type
func._fullname = self.qualified_name(func.name())
self.add_symbol(func.name(), func, func)
def analyze_arg_initializers(self, defn: FuncItem) -> None:
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
# Analyze default arguments
for arg in defn.arguments:
if arg.initializer:
arg.initializer.accept(self)
def analyze_function_body(self, defn: FuncItem) -> None:
is_method = self.is_class_scope()
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
# Bind the type variables again to visit the body.
if defn.type:
a = self.type_analyzer()
a.bind_function_type_variables(cast(CallableType, defn.type), defn)
self.function_stack.append(defn)
self.enter(defn)
for arg in defn.arguments:
self.add_local(arg.variable, defn)
# The first argument of a non-static, non-class method is like 'self'
# (though the name could be different), having the enclosing class's
# instance type.
if is_method and not defn.is_static and not defn.is_class and defn.arguments:
defn.arguments[0].variable.is_self = True
defn.body.accept(self)
self.leave()
self.function_stack.pop()
def check_classvar_in_signature(self, typ: ProperType) -> None:
if isinstance(typ, Overloaded):
for t in typ.items(): # type: ProperType
self.check_classvar_in_signature(t)
return
if not isinstance(typ, CallableType):
return
for t in get_proper_types(typ.arg_types) + [get_proper_type(typ.ret_type)]:
if self.is_classvar(t):
self.fail_invalid_classvar(t)
# Show only one error per signature
break
def check_function_signature(self, fdef: FuncItem) -> None:
sig = fdef.type
assert isinstance(sig, CallableType)
if len(sig.arg_types) < len(fdef.arguments):
self.fail('Type signature has too few arguments', fdef)
# Add dummy Any arguments to prevent crashes later.
num_extra_anys = len(fdef.arguments) - len(sig.arg_types)
extra_anys = [AnyType(TypeOfAny.from_error)] * num_extra_anys
sig.arg_types.extend(extra_anys)
elif len(sig.arg_types) > len(fdef.arguments):
self.fail('Type signature has too many arguments', fdef, blocker=True)
def visit_decorator(self, dec: Decorator) -> None:
self.statement = dec
# TODO: better don't modify them at all.
dec.decorators = dec.original_decorators.copy()
dec.func.is_conditional = self.block_depth[-1] > 0
if not dec.is_overload:
self.add_symbol(dec.name(), dec, dec)
dec.func._fullname = self.qualified_name(dec.name())
for d in dec.decorators:
d.accept(self)
removed = [] # type: List[int]
no_type_check = False
for i, d in enumerate(dec.decorators):
# A bunch of decorators are special cased here.
if refers_to_fullname(d, 'abc.abstractmethod'):
removed.append(i)
dec.func.is_abstract = True
self.check_decorated_function_is_method('abstractmethod', dec)
elif (refers_to_fullname(d, 'asyncio.coroutines.coroutine') or
refers_to_fullname(d, 'types.coroutine')):
removed.append(i)
dec.func.is_awaitable_coroutine = True
elif refers_to_fullname(d, 'builtins.staticmethod'):
removed.append(i)
dec.func.is_static = True
dec.var.is_staticmethod = True
self.check_decorated_function_is_method('staticmethod', dec)
elif refers_to_fullname(d, 'builtins.classmethod'):
removed.append(i)
dec.func.is_class = True
dec.var.is_classmethod = True
self.check_decorated_function_is_method('classmethod', dec)
elif (refers_to_fullname(d, 'builtins.property') or
refers_to_fullname(d, 'abc.abstractproperty')):
removed.append(i)
dec.func.is_property = True
dec.var.is_property = True
if refers_to_fullname(d, 'abc.abstractproperty'):
dec.func.is_abstract = True
self.check_decorated_function_is_method('property', dec)
if len(dec.func.arguments) > 1:
self.fail('Too many arguments', dec.func)
elif refers_to_fullname(d, 'typing.no_type_check'):
dec.var.type = AnyType(TypeOfAny.special_form)
no_type_check = True
elif (refers_to_fullname(d, 'typing.final') or
refers_to_fullname(d, 'typing_extensions.final')):
if self.is_class_scope():
assert self.type is not None, "No type set at class scope"
if self.type.is_protocol:
self.msg.protocol_members_cant_be_final(d)
else:
dec.func.is_final = True
dec.var.is_final = True
removed.append(i)
else:
self.fail("@final cannot be used with non-method functions", d)
for i in reversed(removed):
del dec.decorators[i]
if (not dec.is_overload or dec.var.is_property) and self.type:
dec.var.info = self.type
dec.var.is_initialized_in_class = True
if not no_type_check and self.recurse_into_functions:
dec.func.accept(self)
if dec.decorators and dec.var.is_property:
self.fail('Decorated property not supported', dec)
def check_decorated_function_is_method(self, decorator: str,
context: Context) -> None:
if not self.type or self.is_func_scope():
self.fail("'%s' used with a non-method" % decorator, context)
#
# Classes
#
def visit_class_def(self, defn: ClassDef) -> None:
self.statement = defn
with self.tvar_scope_frame(self.tvar_scope.class_frame()):
self.analyze_class(defn)
def analyze_class(self, defn: ClassDef) -> None:
fullname = self.qualified_name(defn.name)
if not defn.info and not self.is_core_builtin_class(defn):
# Add placeholder so that self-references in base classes can be
# resolved. We don't want this to cause a deferral, since if there
# are no incomplete references, we'll replace this with a TypeInfo
# before returning.
placeholder = PlaceholderNode(fullname, defn, defn.line, becomes_typeinfo=True)
self.add_symbol(defn.name, placeholder, defn, can_defer=False)
tag = self.track_incomplete_refs()
# Restore base classes after previous iteration (things like Generic[T] might be removed).
defn.base_type_exprs.extend(defn.removed_base_type_exprs)
defn.removed_base_type_exprs.clear()
self.update_metaclass(defn)
bases = defn.base_type_exprs
bases, tvar_defs, is_protocol = self.clean_up_bases_and_infer_type_variables(defn, bases,
context=defn)
for tvd in tvar_defs:
if any(has_placeholder(t) for t in [tvd.upper_bound] + tvd.values):
# Some type variable bounds or values are not ready, we need
# to re-analyze this class.
self.defer()
self.analyze_class_keywords(defn)
result = self.analyze_base_classes(bases)
if result is None or self.found_incomplete_ref(tag):
# Something was incomplete. Defer current target.
self.mark_incomplete(defn.name, defn)
return
base_types, base_error = result
if any(isinstance(base, PlaceholderType) for base, _ in base_types):
# We need to know the TypeInfo of each base to construct the MRO. Placeholder types
# are okay in nested positions, since they can't affect the MRO.
self.mark_incomplete(defn.name, defn)
return
is_typeddict, info = self.typed_dict_analyzer.analyze_typeddict_classdef(defn)
if is_typeddict:
if info is None:
self.mark_incomplete(defn.name, defn)
else:
self.prepare_class_def(defn, info)
return
if self.analyze_namedtuple_classdef(defn):
return
# Create TypeInfo for class now that base classes and the MRO can be calculated.
self.prepare_class_def(defn)
defn.type_vars = tvar_defs
defn.info.type_vars = [tvar.name for tvar in tvar_defs]
if base_error:
defn.info.fallback_to_any = True
with self.scope.class_scope(defn.info):
self.configure_base_classes(defn, base_types)
defn.info.is_protocol = is_protocol
self.analyze_metaclass(defn)
defn.info.runtime_protocol = False
for decorator in defn.decorators:
self.analyze_class_decorator(defn, decorator)
self.analyze_class_body_common(defn)
def is_core_builtin_class(self, defn: ClassDef) -> bool:
return self.cur_mod_id == 'builtins' and defn.name in CORE_BUILTIN_CLASSES
def analyze_class_body_common(self, defn: ClassDef) -> None:
"""Parts of class body analysis that are common to all kinds of class defs."""
self.enter_class(defn.info)
defn.defs.accept(self)
self.apply_class_plugin_hooks(defn)
self.leave_class()
def analyze_namedtuple_classdef(self, defn: ClassDef) -> bool:
"""Check if this class can define a named tuple."""
if defn.info and defn.info.is_named_tuple:
# Don't reprocess everything. We just need to process methods defined
# in the named tuple class body.
is_named_tuple, info = True, defn.info # type: bool, Optional[TypeInfo]
else:
is_named_tuple, info = self.named_tuple_analyzer.analyze_namedtuple_classdef(defn)
if is_named_tuple:
if info is None:
self.mark_incomplete(defn.name, defn)
else:
self.prepare_class_def(defn, info)
with self.scope.class_scope(defn.info):
with self.named_tuple_analyzer.save_namedtuple_body(info):
self.analyze_class_body_common(defn)
return True
return False
def apply_class_plugin_hooks(self, defn: ClassDef) -> None:
"""Apply a plugin hook that may infer a more precise definition for a class."""
def get_fullname(expr: Expression) -> Optional[str]:
if isinstance(expr, CallExpr):
return get_fullname(expr.callee)
elif isinstance(expr, IndexExpr):
return get_fullname(expr.base)
elif isinstance(expr, RefExpr):
if expr.fullname:
return expr.fullname
# If we don't have a fullname look it up. This happens because base classes are
# analyzed in a different manner (see exprtotype.py) and therefore those AST
# nodes will not have full names.
sym = self.lookup_type_node(expr)
if sym:
return sym.fullname
return None
for decorator in defn.decorators:
decorator_name = get_fullname(decorator)
if decorator_name:
hook = self.plugin.get_class_decorator_hook(decorator_name)
if hook:
hook(ClassDefContext(defn, decorator, self))
if defn.metaclass:
metaclass_name = get_fullname(defn.metaclass)
if metaclass_name:
hook = self.plugin.get_metaclass_hook(metaclass_name)
if hook:
hook(ClassDefContext(defn, defn.metaclass, self))
for base_expr in defn.base_type_exprs:
base_name = get_fullname(base_expr)
if base_name:
hook = self.plugin.get_base_class_hook(base_name)
if hook:
hook(ClassDefContext(defn, base_expr, self))
def analyze_class_keywords(self, defn: ClassDef) -> None:
for value in defn.keywords.values():
value.accept(self)
def enter_class(self, info: TypeInfo) -> None:
# Remember previous active class
self.type_stack.append(self.type)
self.locals.append(None) # Add class scope
self.is_comprehension_stack.append(False)
self.block_depth.append(-1) # The class body increments this to 0
self.type = info
def leave_class(self) -> None:
""" Restore analyzer state. """
self.block_depth.pop()
self.locals.pop()
self.is_comprehension_stack.pop()
self.type = self.type_stack.pop()
def analyze_class_decorator(self, defn: ClassDef, decorator: Expression) -> None:
decorator.accept(self)
if isinstance(decorator, RefExpr):
if decorator.fullname in RUNTIME_PROTOCOL_DECOS:
if defn.info.is_protocol:
defn.info.runtime_protocol = True
else:
self.fail('@runtime_checkable can only be used with protocol classes',
defn)
elif decorator.fullname in ('typing.final',
'typing_extensions.final'):
defn.info.is_final = True
def clean_up_bases_and_infer_type_variables(
self,
defn: ClassDef,
base_type_exprs: List[Expression],
context: Context) -> Tuple[List[Expression],
List[TypeVarDef],
bool]:
"""Remove extra base classes such as Generic and infer type vars.
For example, consider this class:
class Foo(Bar, Generic[T]): ...
Now we will remove Generic[T] from bases of Foo and infer that the
type variable 'T' is a type argument of Foo.
Note that this is performed *before* semantic analysis.
Returns (remaining base expressions, inferred type variables, is protocol).
"""
removed = [] # type: List[int]
declared_tvars = [] # type: TypeVarList
is_protocol = False
for i, base_expr in enumerate(base_type_exprs):
self.analyze_type_expr(base_expr)
try:
base = expr_to_unanalyzed_type(base_expr)
except TypeTranslationError:
# This error will be caught later.
continue
result = self.analyze_class_typevar_declaration(base)
if result is not None:
if declared_tvars:
self.fail('Only single Generic[...] or Protocol[...] can be in bases', context)
removed.append(i)
tvars = result[0]
is_protocol |= result[1]
declared_tvars.extend(tvars)
if isinstance(base, UnboundType):
sym = self.lookup_qualified(base.name, base)
if sym is not None and sym.node is not None:
if (sym.node.fullname() in ('typing.Protocol',
'typing_extensions.Protocol') and
i not in removed):
# also remove bare 'Protocol' bases
removed.append(i)
is_protocol = True
all_tvars = self.get_all_bases_tvars(base_type_exprs, removed)
if declared_tvars:
if len(remove_dups(declared_tvars)) < len(declared_tvars):
self.fail("Duplicate type variables in Generic[...] or Protocol[...]", context)
declared_tvars = remove_dups(declared_tvars)
if not set(all_tvars).issubset(set(declared_tvars)):
self.fail("If Generic[...] or Protocol[...] is present"
" it should list all type variables", context)
# In case of error, Generic tvars will go first
declared_tvars = remove_dups(declared_tvars + all_tvars)
else:
declared_tvars = all_tvars
for i in reversed(removed):
# We need to actually remove the base class expressions like Generic[T],
# mostly because otherwise they will create spurious dependencies in fine
# grained incremental mode.
defn.removed_base_type_exprs.append(defn.base_type_exprs[i])
del base_type_exprs[i]
tvar_defs = [] # type: List[TypeVarDef]
for name, tvar_expr in declared_tvars:
tvar_def = self.tvar_scope.bind_new(name, tvar_expr)
tvar_defs.append(tvar_def)
return base_type_exprs, tvar_defs, is_protocol
def analyze_class_typevar_declaration(self, base: Type) -> Optional[Tuple[TypeVarList, bool]]:
"""Analyze type variables declared using Generic[...] or Protocol[...].
Args:
base: Non-analyzed base class
Return None if the base class does not declare type variables. Otherwise,
return the type variables.
"""
if not isinstance(base, UnboundType):
return None
unbound = base
sym = self.lookup_qualified(unbound.name, unbound)
if sym is None or sym.node is None:
return None
if (sym.node.fullname() == 'typing.Generic' or
sym.node.fullname() == 'typing.Protocol' and base.args or
sym.node.fullname() == 'typing_extensions.Protocol' and base.args):
is_proto = sym.node.fullname() != 'typing.Generic'
tvars = [] # type: TypeVarList
for arg in unbound.args:
tag = self.track_incomplete_refs()
tvar = self.analyze_unbound_tvar(arg)
if tvar:
tvars.append(tvar)
elif not self.found_incomplete_ref(tag):
self.fail('Free type variable expected in %s[...]' %
sym.node.name(), base)
return tvars, is_proto
return None
def analyze_unbound_tvar(self, t: Type) -> Optional[Tuple[str, TypeVarExpr]]:
if not isinstance(t, UnboundType):
return None
unbound = t
sym = self.lookup_qualified(unbound.name, unbound)
if sym and isinstance(sym.node, PlaceholderNode):
self.record_incomplete_ref()
if sym is None or not isinstance(sym.node, TypeVarExpr):
return None
elif sym.fullname and not self.tvar_scope.allow_binding(sym.fullname):
# It's bound by our type variable scope
return None
else:
assert isinstance(sym.node, TypeVarExpr)
return unbound.name, sym.node
def get_all_bases_tvars(self,
base_type_exprs: List[Expression],
removed: List[int]) -> TypeVarList:
"""Return all type variable references in bases."""
tvars = [] # type: TypeVarList
for i, base_expr in enumerate(base_type_exprs):
if i not in removed:
try:
base = expr_to_unanalyzed_type(base_expr)
except TypeTranslationError:
# This error will be caught later.
continue
base_tvars = base.accept(TypeVariableQuery(self.lookup_qualified, self.tvar_scope))
tvars.extend(base_tvars)
return remove_dups(tvars)
def prepare_class_def(self, defn: ClassDef, info: Optional[TypeInfo] = None) -> None:
"""Prepare for the analysis of a class definition.
Create an empty TypeInfo and store it in a symbol table, or if the 'info'
argument is provided, store it instead (used for magic type definitions).
"""
if not defn.info:
defn.fullname = self.qualified_name(defn.name)
# TODO: Nested classes
info = info or self.make_empty_type_info(defn)
defn.info = info
info.defn = defn
if not self.is_func_scope():
info._fullname = self.qualified_name(defn.name)
else:
info._fullname = info.name()
self.add_symbol(defn.name, defn.info, defn)
if self.is_nested_within_func_scope():
# We need to preserve local classes, let's store them
# in globals under mangled unique names
#
# TODO: Putting local classes into globals breaks assumptions in fine-grained
# incremental mode and we should avoid it. In general, this logic is too
# ad-hoc and needs to be removed/refactored.
if '@' not in defn.info._fullname:
local_name = defn.info._fullname + '@' + str(defn.line)
if defn.info.is_named_tuple:
# Module is already correctly set in _fullname for named tuples.
defn.info._fullname += '@' + str(defn.line)
else:
defn.info._fullname = self.cur_mod_id + '.' + local_name
else:
# Preserve name from previous fine-grained incremental run.
local_name = defn.info._fullname
defn.fullname = defn.info._fullname
self.globals[local_name] = SymbolTableNode(GDEF, defn.info)
def make_empty_type_info(self, defn: ClassDef) -> TypeInfo:
if (self.is_module_scope()
and self.cur_mod_id == 'builtins'
and defn.name in CORE_BUILTIN_CLASSES):
# Special case core built-in classes. A TypeInfo was already
# created for it before semantic analysis, but with a dummy
# ClassDef. Patch the real ClassDef object.
info = self.globals[defn.name].node
assert isinstance(info, TypeInfo)
else:
info = TypeInfo(SymbolTable(), defn, self.cur_mod_id)
info.set_line(defn)
return info
def get_name_repr_of_expr(self, expr: Expression) -> Optional[str]:
"""Try finding a short simplified textual representation of a base class expression."""
if isinstance(expr, NameExpr):
return expr.name
if isinstance(expr, MemberExpr):
return get_member_expr_fullname(expr)
if isinstance(expr, IndexExpr):
return self.get_name_repr_of_expr(expr.base)
if isinstance(expr, CallExpr):
return self.get_name_repr_of_expr(expr.callee)
return None
def analyze_base_classes(
self,
base_type_exprs: List[Expression]) -> Optional[Tuple[List[Tuple[ProperType,
Expression]],
bool]]:
"""Analyze base class types.
Return None if some definition was incomplete. Otherwise, return a tuple
with these items:
* List of (analyzed type, original expression) tuples
* Boolean indicating whether one of the bases had a semantic analysis error
"""
is_error = False
bases = []
for base_expr in base_type_exprs:
if (isinstance(base_expr, RefExpr) and
base_expr.fullname in ('typing.NamedTuple',) + TPDICT_NAMES):
# Ignore magic bases for now.
continue
try:
base = self.expr_to_analyzed_type(base_expr, allow_placeholder=True)
except TypeTranslationError:
name = self.get_name_repr_of_expr(base_expr)
if isinstance(base_expr, CallExpr):
msg = 'Unsupported dynamic base class'
else:
msg = 'Invalid base class'
if name:
msg += ' "{}"'.format(name)
self.fail(msg, base_expr)
is_error = True
continue
if base is None:
return None
base = get_proper_type(base)
bases.append((base, base_expr))
return bases, is_error
def configure_base_classes(self,
defn: ClassDef,
bases: List[Tuple[ProperType, Expression]]) -> None:
"""Set up base classes.
This computes several attributes on the corresponding TypeInfo defn.info
related to the base classes: defn.info.bases, defn.info.mro, and
miscellaneous others (at least tuple_type, fallback_to_any, and is_enum.)
"""
base_types = [] # type: List[Instance]
info = defn.info
info.tuple_type = None
for base, base_expr in bases:
if isinstance(base, TupleType):
actual_base = self.configure_tuple_base_class(defn, base, base_expr)
base_types.append(actual_base)
elif isinstance(base, Instance):
if base.type.is_newtype:
self.fail("Cannot subclass NewType", defn)
base_types.append(base)
elif isinstance(base, AnyType):
if self.options.disallow_subclassing_any:
if isinstance(base_expr, (NameExpr, MemberExpr)):
msg = "Class cannot subclass '{}' (has type 'Any')".format(base_expr.name)
else:
msg = "Class cannot subclass value of type 'Any'"
self.fail(msg, base_expr)
info.fallback_to_any = True
else:
msg = 'Invalid base class'
name = self.get_name_repr_of_expr(base_expr)
if name:
msg += ' "{}"'.format(name)
self.fail(msg, base_expr)
info.fallback_to_any = True
if self.options.disallow_any_unimported and has_any_from_unimported_type(base):
if isinstance(base_expr, (NameExpr, MemberExpr)):
prefix = "Base type {}".format(base_expr.name)
else:
prefix = "Base type"
self.msg.unimported_type_becomes_any(prefix, base, base_expr)
check_for_explicit_any(base, self.options, self.is_typeshed_stub_file, self.msg,
context=base_expr)
# Add 'object' as implicit base if there is no other base class.
if not base_types and defn.fullname != 'builtins.object':
base_types.append(self.object_type())
info.bases = base_types
# Calculate the MRO.
if not self.verify_base_classes(defn):
# Give it an MRO consisting of just the class itself and object.
defn.info.mro = [defn.info, self.object_type().type]
return
self.calculate_class_mro(defn, self.object_type)
def configure_tuple_base_class(self,
defn: ClassDef,
base: TupleType,
base_expr: Expression) -> Instance:
info = defn.info
# There may be an existing valid tuple type from previous semanal iterations.
# Use equality to check if it is the case.
if info.tuple_type and info.tuple_type != base:
self.fail("Class has two incompatible bases derived from tuple", defn)
defn.has_incompatible_baseclass = True
info.tuple_type = base
if isinstance(base_expr, CallExpr):
defn.analyzed = NamedTupleExpr(base.partial_fallback.type)
defn.analyzed.line = defn.line
defn.analyzed.column = defn.column
if base.partial_fallback.type.fullname() == 'builtins.tuple':
# Fallback can only be safely calculated after semantic analysis, since base
# classes may be incomplete. Postpone the calculation.
self.schedule_patch(PRIORITY_FALLBACKS, lambda: calculate_tuple_fallback(base))
return base.partial_fallback
def calculate_class_mro(self, defn: ClassDef,
obj_type: Optional[Callable[[], Instance]] = None) -> None:
"""Calculate method resolution order for a class.
`obj_type` may be omitted in the third pass when all classes are already analyzed.
It exists just to fill in empty base class list during second pass in case of
an import cycle.
"""
try:
calculate_mro(defn.info, obj_type)
except MroError:
self.fail_blocker('Cannot determine consistent method resolution '
'order (MRO) for "%s"' % defn.name, defn)
defn.info.mro = []
# Allow plugins to alter the MRO to handle the fact that `def mro()`
# on metaclasses permits MRO rewriting.
if defn.fullname:
hook = self.plugin.get_customize_class_mro_hook(defn.fullname)
if hook:
hook(ClassDefContext(defn, FakeExpression(), self))
def update_metaclass(self, defn: ClassDef) -> None:
"""Lookup for special metaclass declarations, and update defn fields accordingly.
* __metaclass__ attribute in Python 2
* six.with_metaclass(M, B1, B2, ...)
* @six.add_metaclass(M)
"""
# Look for "__metaclass__ = <metaclass>" in Python 2
python2_meta_expr = None # type: Optional[Expression]
if self.options.python_version[0] == 2:
for body_node in defn.defs.body:
if isinstance(body_node, ClassDef) and body_node.name == "__metaclass__":
self.fail("Metaclasses defined as inner classes are not supported", body_node)
break
elif isinstance(body_node, AssignmentStmt) and len(body_node.lvalues) == 1:
lvalue = body_node.lvalues[0]
if isinstance(lvalue, NameExpr) and lvalue.name == "__metaclass__":
python2_meta_expr = body_node.rvalue
# Look for six.with_metaclass(M, B1, B2, ...)
with_meta_expr = None # type: Optional[Expression]
if len(defn.base_type_exprs) == 1:
base_expr = defn.base_type_exprs[0]
if isinstance(base_expr, CallExpr) and isinstance(base_expr.callee, RefExpr):
base_expr.callee.accept(self)
if (base_expr.callee.fullname == 'six.with_metaclass'
and len(base_expr.args) >= 1
and all(kind == ARG_POS for kind in base_expr.arg_kinds)):
with_meta_expr = base_expr.args[0]
defn.base_type_exprs = base_expr.args[1:]
# Look for @six.add_metaclass(M)
add_meta_expr = None # type: Optional[Expression]
for dec_expr in defn.decorators:
if isinstance(dec_expr, CallExpr) and isinstance(dec_expr.callee, RefExpr):
dec_expr.callee.accept(self)
if (dec_expr.callee.fullname == 'six.add_metaclass'
and len(dec_expr.args) == 1
and dec_expr.arg_kinds[0] == ARG_POS):
add_meta_expr = dec_expr.args[0]
break
metas = {defn.metaclass, python2_meta_expr, with_meta_expr, add_meta_expr} - {None}
if len(metas) == 0:
return
if len(metas) > 1:
self.fail("Multiple metaclass definitions", defn)
return
defn.metaclass = metas.pop()
def verify_base_classes(self, defn: ClassDef) -> bool:
info = defn.info
for base in info.bases:
baseinfo = base.type
if self.is_base_class(info, baseinfo):
self.fail('Cycle in inheritance hierarchy', defn, blocker=True)
# Clear bases to forcefully get rid of the cycle.
info.bases = []
if baseinfo.fullname() == 'builtins.bool':
self.fail("'%s' is not a valid base class" %
baseinfo.name(), defn, blocker=True)
return False
dup = find_duplicate(info.direct_base_classes())
if dup:
self.fail('Duplicate base class "%s"' % dup.name(), defn, blocker=True)
return False
return True
def is_base_class(self, t: TypeInfo, s: TypeInfo) -> bool:
"""Determine if t is a base class of s (but do not use mro)."""
# Search the base class graph for t, starting from s.
worklist = [s]
visited = {s}
while worklist:
nxt = worklist.pop()
if nxt == t:
return True
for base in nxt.bases:
if base.type not in visited:
worklist.append(base.type)
visited.add(base.type)
return False
def analyze_metaclass(self, defn: ClassDef) -> None:
if defn.metaclass:
metaclass_name = None
if isinstance(defn.metaclass, NameExpr):
metaclass_name = defn.metaclass.name
elif isinstance(defn.metaclass, MemberExpr):
metaclass_name = get_member_expr_fullname(defn.metaclass)
if metaclass_name is None:
self.fail("Dynamic metaclass not supported for '%s'" % defn.name, defn.metaclass)
return
sym = self.lookup_qualified(metaclass_name, defn.metaclass)
if sym is None:
# Probably a name error - it is already handled elsewhere
return
if isinstance(sym.node, Var) and isinstance(get_proper_type(sym.node.type), AnyType):
# 'Any' metaclass -- just ignore it.
#
# TODO: A better approach would be to record this information
# and assume that the type object supports arbitrary
# attributes, similar to an 'Any' base class.
return
if isinstance(sym.node, PlaceholderNode):
self.defer(defn)
return
if not isinstance(sym.node, TypeInfo) or sym.node.tuple_type is not None:
self.fail("Invalid metaclass '%s'" % metaclass_name, defn.metaclass)
return
if not sym.node.is_metaclass():
self.fail("Metaclasses not inheriting from 'type' are not supported",
defn.metaclass)
return
inst = fill_typevars(sym.node)
assert isinstance(inst, Instance)
defn.info.declared_metaclass = inst
defn.info.metaclass_type = defn.info.calculate_metaclass_type()
if any(info.is_protocol for info in defn.info.mro):
if (not defn.info.metaclass_type or
defn.info.metaclass_type.type.fullname() == 'builtins.type'):
# All protocols and their subclasses have ABCMeta metaclass by default.
# TODO: add a metaclass conflict check if there is another metaclass.
abc_meta = self.named_type_or_none('abc.ABCMeta', [])
if abc_meta is not None: # May be None in tests with incomplete lib-stub.
defn.info.metaclass_type = abc_meta
if defn.info.metaclass_type is None:
# Inconsistency may happen due to multiple baseclasses even in classes that
# do not declare explicit metaclass, but it's harder to catch at this stage
if defn.metaclass is not None:
self.fail("Inconsistent metaclass structure for '%s'" % defn.name, defn)
else:
if defn.info.metaclass_type.type.has_base('enum.EnumMeta'):
defn.info.is_enum = True
if defn.type_vars:
self.fail("Enum class cannot be generic", defn)
#
# Imports
#
def visit_import(self, i: Import) -> None:
self.statement = i
for id, as_id in i.ids:
if as_id is not None:
self.add_module_symbol(id, as_id, module_public=True, context=i)
else:
# Modules imported in a stub file without using 'as x' won't get exported
# When implicit re-exporting is disabled, we have the same behavior as stubs.
module_public = (
not self.is_stub_file
and self.options.implicit_reexport
)
base = id.split('.')[0]
self.add_module_symbol(base, base, module_public=module_public,
context=i, module_hidden=not module_public)
def visit_import_from(self, imp: ImportFrom) -> None:
self.statement = imp
module_id = self.correct_relative_import(imp)
module = self.modules.get(module_id)
for id, as_id in imp.names:
fullname = module_id + '.' + id
if module is None:
node = None
elif module_id == self.cur_mod_id and fullname in self.modules:
# Submodule takes precedence over definition in surround package, for
# compatibility with runtime semantics in typical use cases. This
# could more precisely model runtime semantics by taking into account
# the line number beyond which the local definition should take
# precedence, but doesn't seem to be important in most use cases.
node = SymbolTableNode(GDEF, self.modules[fullname])
else:
node = module.names.get(id)
missing_submodule = False
imported_id = as_id or id
# If the module does not contain a symbol with the name 'id',
# try checking if it's a module instead.
if not node:
mod = self.modules.get(fullname)
if mod is not None:
kind = self.current_symbol_kind()
node = SymbolTableNode(kind, mod)
elif fullname in self.missing_modules:
missing_submodule = True
# If it is still not resolved, check for a module level __getattr__
if (module and not node and (module.is_stub or self.options.python_version >= (3, 7))
and '__getattr__' in module.names):
# We store the fullname of the original definition so that we can
# detect whether two imported names refer to the same thing.
fullname = module_id + '.' + id
gvar = self.create_getattr_var(module.names['__getattr__'], imported_id, fullname)
if gvar:
self.add_symbol(imported_id, gvar, imp)
continue
if node and not node.module_hidden:
self.process_imported_symbol(node, module_id, id, as_id, fullname, imp)
elif module and not missing_submodule:
# Target module exists but the imported name is missing or hidden.
self.report_missing_module_attribute(module_id, id, imported_id, imp)
else:
# Import of a missing (sub)module.
self.add_unknown_imported_symbol(imported_id, imp, target_name=fullname)
def process_imported_symbol(self,
node: SymbolTableNode,
module_id: str,
id: str,
as_id: Optional[str],
fullname: str,
context: ImportBase) -> None:
imported_id = as_id or id
if isinstance(node.node, PlaceholderNode):
if self.final_iteration:
self.report_missing_module_attribute(module_id, id, imported_id, context)
return
else:
# This might become a type.
self.mark_incomplete(imported_id, node.node, becomes_typeinfo=True)
existing_symbol = self.globals.get(imported_id)
if (existing_symbol and not isinstance(existing_symbol.node, PlaceholderNode) and
not isinstance(node.node, PlaceholderNode)):
# Import can redefine a variable. They get special treatment.
if self.process_import_over_existing_name(
imported_id, existing_symbol, node, context):
return
if existing_symbol and isinstance(node.node, PlaceholderNode):
# Imports are special, some redefinitions are allowed, so wait until
# we know what is the new symbol node.
return
# 'from m import x as x' exports x in a stub file or when implicit
# re-exports are disabled.
module_public = (
not self.is_stub_file
and self.options.implicit_reexport
or as_id is not None
)
module_hidden = not module_public and fullname not in self.modules
# NOTE: we take the original node even for final `Var`s. This is to support
# a common pattern when constants are re-exported (same applies to import *).
self.add_imported_symbol(imported_id, node, context,
module_public=module_public,
module_hidden=module_hidden)
def report_missing_module_attribute(self, import_id: str, source_id: str, imported_id: str,
context: Node) -> None:
# Missing attribute.
if self.is_incomplete_namespace(import_id):
# We don't know whether the name will be there, since the namespace
# is incomplete. Defer the current target.
self.mark_incomplete(imported_id, context)
return
message = "Module '{}' has no attribute '{}'".format(import_id, source_id)
# Suggest alternatives, if any match is found.
module = self.modules.get(import_id)
if module:
alternatives = set(module.names.keys()).difference({source_id})
matches = best_matches(source_id, alternatives)[:3]
if matches:
suggestion = "; maybe {}?".format(pretty_or(matches))
message += "{}".format(suggestion)
self.fail(message, context, code=codes.ATTR_DEFINED)
self.add_unknown_imported_symbol(imported_id, context)
if import_id == 'typing':
# The user probably has a missing definition in a test fixture. Let's verify.
fullname = 'builtins.{}'.format(source_id.lower())
if (self.lookup_fully_qualified_or_none(fullname) is None and
fullname in SUGGESTED_TEST_FIXTURES):
# Yes. Generate a helpful note.
self.add_fixture_note(fullname, context)
def process_import_over_existing_name(self,
imported_id: str, existing_symbol: SymbolTableNode,
module_symbol: SymbolTableNode,
import_node: ImportBase) -> bool:
if existing_symbol.node is module_symbol.node:
# We added this symbol on previous iteration.
return False
if (existing_symbol.kind in (LDEF, GDEF, MDEF) and
isinstance(existing_symbol.node, (Var, FuncDef, TypeInfo, Decorator, TypeAlias))):
# This is a valid import over an existing definition in the file. Construct a dummy
# assignment that we'll use to type check the import.
lvalue = NameExpr(imported_id)
lvalue.kind = existing_symbol.kind
lvalue.node = existing_symbol.node
rvalue = NameExpr(imported_id)
rvalue.kind = module_symbol.kind
rvalue.node = module_symbol.node
if isinstance(rvalue.node, TypeAlias):
# Suppress bogus errors from the dummy assignment if rvalue is an alias.
# Otherwise mypy may complain that alias is invalid in runtime context.
rvalue.is_alias_rvalue = True
assignment = AssignmentStmt([lvalue], rvalue)
for node in assignment, lvalue, rvalue:
node.set_line(import_node)
import_node.assignments.append(assignment)
return True
return False
def add_fixture_note(self, fullname: str, ctx: Context) -> None:
self.note('Maybe your test fixture does not define "{}"?'.format(fullname), ctx)
if fullname in SUGGESTED_TEST_FIXTURES:
self.note(
'Consider adding [builtins fixtures/{}] to your test description'.format(
SUGGESTED_TEST_FIXTURES[fullname]), ctx)
def correct_relative_import(self, node: Union[ImportFrom, ImportAll]) -> str:
import_id, ok = correct_relative_import(self.cur_mod_id, node.relative, node.id,
self.cur_mod_node.is_package_init_file())
if not ok:
self.fail("Relative import climbs too many namespaces", node)
return import_id
def visit_import_all(self, i: ImportAll) -> None:
i_id = self.correct_relative_import(i)
if i_id in self.modules:
m = self.modules[i_id]
if self.is_incomplete_namespace(i_id):
# Any names could be missing from the current namespace if the target module
# namespace is incomplete.
self.mark_incomplete('*', i)
for name, node in m.names.items():
if node is None:
continue
# if '__all__' exists, all nodes not included have had module_public set to
# False, and we can skip checking '_' because it's been explicitly included.
if node.module_public and (not name.startswith('_') or '__all__' in m.names):
if isinstance(node.node, MypyFile):
# Star import of submodule from a package, add it as a dependency.
self.imports.add(node.node.fullname())
existing_symbol = self.lookup_current_scope(name)
if existing_symbol and not isinstance(node.node, PlaceholderNode):
# Import can redefine a variable. They get special treatment.
if self.process_import_over_existing_name(
name, existing_symbol, node, i):
continue
# In stub files, `from x import *` always reexports the symbols.
# In regular files, only if implicit reexports are enabled.
module_public = self.is_stub_file or self.options.implicit_reexport
self.add_imported_symbol(name, node, i,
module_public=module_public,
module_hidden=not module_public)
else:
# Don't add any dummy symbols for 'from x import *' if 'x' is unknown.
pass
#
# Assignment
#
def visit_assignment_expr(self, s: AssignmentExpr) -> None:
s.value.accept(self)
self.analyze_lvalue(s.target, escape_comprehensions=True)
def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
self.statement = s
# Special case assignment like X = X.
if self.analyze_identity_global_assignment(s):
return
tag = self.track_incomplete_refs()
s.rvalue.accept(self)
if self.found_incomplete_ref(tag) or self.should_wait_rhs(s.rvalue):
# Initializer couldn't be fully analyzed. Defer the current node and give up.
# Make sure that if we skip the definition of some local names, they can't be
# added later in this scope, since an earlier definition should take precedence.
for expr in names_modified_by_assignment(s):
self.mark_incomplete(expr.name, expr)
return
# The r.h.s. is now ready to be classified, first check if it is a special form:
special_form = False
# * type alias
if self.check_and_set_up_type_alias(s):
s.is_alias_def = True
special_form = True
# * type variable definition
elif self.process_typevar_declaration(s):
special_form = True
# * type constructors
elif self.analyze_namedtuple_assign(s):
special_form = True
elif self.analyze_typeddict_assign(s):
special_form = True
elif self.newtype_analyzer.process_newtype_declaration(s):
special_form = True
elif self.analyze_enum_assign(s):
special_form = True
if special_form:
self.record_special_form_lvalue(s)
return
# OK, this is a regular assignment, perform the necessary analysis steps.
s.is_final_def = self.unwrap_final(s)
self.analyze_lvalues(s)
self.check_final_implicit_def(s)
self.check_classvar(s)
self.process_type_annotation(s)
self.apply_dynamic_class_hook(s)
self.store_final_status(s)
if not s.type:
self.process_module_assignment(s.lvalues, s.rvalue, s)
self.process__all__(s)
def analyze_identity_global_assignment(self, s: AssignmentStmt) -> bool:
"""Special case 'X = X' in global scope.
This allows supporting some important use cases.
Return true if special casing was applied.
"""
if not isinstance(s.rvalue, NameExpr) or len(s.lvalues) != 1:
# Not of form 'X = X'
return False
lvalue = s.lvalues[0]
if not isinstance(lvalue, NameExpr) or s.rvalue.name != lvalue.name:
# Not of form 'X = X'
return False
if self.type is not None or self.is_func_scope():
# Not in global scope
return False
# It's an assignment like 'X = X' in the global scope.
name = lvalue.name
sym = self.lookup(name, s)
if sym is None:
if self.final_iteration:
# Fall back to normal assignment analysis.
return False
else:
self.defer()
return True
else:
if sym.node is None:
# Something special -- fall back to normal assignment analysis.
return False
if name not in self.globals:
# The name is from builtins. Add an alias to the current module.
self.add_symbol(name, sym.node, s)
if not isinstance(sym.node, PlaceholderNode):
for node in s.rvalue, lvalue:
node.node = sym.node
node.kind = GDEF
node.fullname = sym.node.fullname()
return True
def should_wait_rhs(self, rv: Expression) -> bool:
"""Can we already classify this r.h.s. of an assignment or should we wait?
This returns True if we don't have enough information to decide whether
an assignment is just a normal variable definition or a special form.
Always return False if this is a final iteration. This will typically cause
the lvalue to be classified as a variable plus emit an error.
"""
if self.final_iteration:
# No chance, nothing has changed.
return False
if isinstance(rv, NameExpr):
n = self.lookup(rv.name, rv)
if n and isinstance(n.node, PlaceholderNode) and not n.node.becomes_typeinfo:
return True
elif isinstance(rv, MemberExpr):
fname = get_member_expr_fullname(rv)
if fname:
n = self.lookup_qualified(fname, rv, suppress_errors=True)
if n and isinstance(n.node, PlaceholderNode) and not n.node.becomes_typeinfo:
return True
elif isinstance(rv, IndexExpr) and isinstance(rv.base, RefExpr):
return self.should_wait_rhs(rv.base)
elif isinstance(rv, CallExpr) and isinstance(rv.callee, RefExpr):
# This is only relevant for builtin SCC where things like 'TypeVar'
# may be not ready.
return self.should_wait_rhs(rv.callee)
return False
def can_be_type_alias(self, rv: Expression) -> bool:
"""Is this a valid r.h.s. for an alias definition?
Note: this function should be only called for expressions where self.should_wait_rhs()
returns False.
"""
if isinstance(rv, RefExpr) and self.is_type_ref(rv, bare=True):
return True
if isinstance(rv, IndexExpr) and self.is_type_ref(rv.base, bare=False):
return True
if self.is_none_alias(rv):
return True
return False
def is_type_ref(self, rv: Expression, bare: bool = False) -> bool:
"""Does this expression refer to a type?
This includes:
* Special forms, like Any or Union
* Classes (except subscripted enums)
* Other type aliases
* PlaceholderNodes with becomes_typeinfo=True (these can be not ready class
definitions, and not ready aliases).
If bare is True, this is not a base of an index expression, so some special
forms are not valid (like a bare Union).
Note: This method should be only used in context of a type alias definition.
This method can only return True for RefExprs, to check if C[int] is a valid
target for type alias call this method on expr.base (i.e. on C in C[int]).
See also can_be_type_alias().
"""
if not isinstance(rv, RefExpr):
return False
if isinstance(rv.node, TypeVarExpr):
self.fail('Type variable "{}" is invalid as target for type alias'.format(
rv.fullname), rv)
return False
if bare:
# These three are valid even if bare, for example
# A = Tuple is just equivalent to A = Tuple[Any, ...].
valid_refs = {'typing.Any', 'typing.Tuple', 'typing.Callable'}
else:
valid_refs = type_constructors
if isinstance(rv.node, TypeAlias) or rv.fullname in valid_refs:
return True
if isinstance(rv.node, TypeInfo):
if bare:
return True
# Assignment color = Color['RED'] defines a variable, not an alias.
return not rv.node.is_enum
if isinstance(rv, NameExpr):
n = self.lookup(rv.name, rv)
if n and isinstance(n.node, PlaceholderNode) and n.node.becomes_typeinfo:
return True
elif isinstance(rv, MemberExpr):
fname = get_member_expr_fullname(rv)
if fname:
# The r.h.s. for variable definitions may not be a type reference but just
# an instance attribute, so suppress the errors.
n = self.lookup_qualified(fname, rv, suppress_errors=True)
if n and isinstance(n.node, PlaceholderNode) and n.node.becomes_typeinfo:
return True
return False
def is_none_alias(self, node: Expression) -> bool:
"""Is this a r.h.s. for a None alias?
We special case the assignments like Void = type(None), to allow using
Void in type annotations.
"""
if isinstance(node, CallExpr):
if (isinstance(node.callee, NameExpr) and len(node.args) == 1 and
isinstance(node.args[0], NameExpr)):
call = self.lookup_qualified(node.callee.name, node.callee)
arg = self.lookup_qualified(node.args[0].name, node.args[0])
if (call is not None and call.node and call.node.fullname() == 'builtins.type' and
arg is not None and arg.node and arg.node.fullname() == 'builtins.None'):
return True
return False
def record_special_form_lvalue(self, s: AssignmentStmt) -> None:
"""Record minimal necessary information about l.h.s. of a special form.
This exists mostly for compatibility with the old semantic analyzer.
"""
lvalue = s.lvalues[0]
assert isinstance(lvalue, NameExpr)
lvalue.is_special_form = True
if self.current_symbol_kind() == GDEF:
lvalue.fullname = self.qualified_name(lvalue.name)
lvalue.kind = self.current_symbol_kind()
def analyze_enum_assign(self, s: AssignmentStmt) -> bool:
"""Check if s defines an Enum."""
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.analyzed, EnumCallExpr):
# Already analyzed enum -- nothing to do here.
return True
return self.enum_call_analyzer.process_enum_call(s, self.is_func_scope())
def analyze_namedtuple_assign(self, s: AssignmentStmt) -> bool:
"""Check if s defines a namedtuple."""
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.analyzed, NamedTupleExpr):
return True # This is a valid and analyzed named tuple definition, nothing to do here.
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], NameExpr):
return False
lvalue = s.lvalues[0]
name = lvalue.name
is_named_tuple, info = self.named_tuple_analyzer.check_namedtuple(s.rvalue, name,
self.is_func_scope())
if not is_named_tuple:
return False
# Yes, it's a valid namedtuple, but defer if it is not ready.
if not info:
self.mark_incomplete(name, lvalue, becomes_typeinfo=True)
return True
def analyze_typeddict_assign(self, s: AssignmentStmt) -> bool:
"""Check if s defines a typed dict."""
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.analyzed, TypedDictExpr):
return True # This is a valid and analyzed typed dict definition, nothing to do here.
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], NameExpr):
return False
lvalue = s.lvalues[0]
name = lvalue.name
is_typed_dict, info = self.typed_dict_analyzer.check_typeddict(s.rvalue, name,
self.is_func_scope())
if not is_typed_dict:
return False
# Yes, it's a valid typed dict, but defer if it is not ready.
if not info:
self.mark_incomplete(name, lvalue, becomes_typeinfo=True)
return True
def analyze_lvalues(self, s: AssignmentStmt) -> None:
# We cannot use s.type, because analyze_simple_literal_type() will set it.
explicit = s.unanalyzed_type is not None
if self.is_final_type(s.unanalyzed_type):
# We need to exclude bare Final.
assert isinstance(s.unanalyzed_type, UnboundType)
if not s.unanalyzed_type.args:
explicit = False
for lval in s.lvalues:
self.analyze_lvalue(lval,
explicit_type=explicit,
is_final=s.is_final_def)
def apply_dynamic_class_hook(self, s: AssignmentStmt) -> None:
if len(s.lvalues) > 1:
return
lval = s.lvalues[0]
if not isinstance(lval, NameExpr) or not isinstance(s.rvalue, CallExpr):
return
call = s.rvalue
if not isinstance(call.callee, RefExpr):
return
fname = call.callee.fullname
if fname:
hook = self.plugin.get_dynamic_class_hook(fname)
if hook:
hook(DynamicClassDefContext(call, lval.name, self))
def unwrap_final(self, s: AssignmentStmt) -> bool:
"""Strip Final[...] if present in an assignment.
This is done to invoke type inference during type checking phase for this
assignment. Also, Final[...] desn't affect type in any way -- it is rather an
access qualifier for given `Var`.
Also perform various consistency checks.
Returns True if Final[...] was present.
"""
if not s.unanalyzed_type or not self.is_final_type(s.unanalyzed_type):
return False
assert isinstance(s.unanalyzed_type, UnboundType)
if len(s.unanalyzed_type.args) > 1:
self.fail("Final[...] takes at most one type argument", s.unanalyzed_type)
invalid_bare_final = False
if not s.unanalyzed_type.args:
s.type = None
if isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs:
invalid_bare_final = True
self.fail("Type in Final[...] can only be omitted if there is an initializer", s)
else:
s.type = s.unanalyzed_type.args[0]
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], RefExpr):
self.fail("Invalid final declaration", s)
return False
lval = s.lvalues[0]
assert isinstance(lval, RefExpr)
# Reset inferred status if it was set due to simple literal rvalue on previous iteration.
# TODO: this is a best-effort quick fix, we should avoid the need to manually sync this,
# see https://github.com/python/mypy/issues/6458.
if lval.is_new_def:
lval.is_inferred_def = s.type is None
if self.loop_depth > 0:
self.fail("Cannot use Final inside a loop", s)
if self.type and self.type.is_protocol:
self.msg.protocol_members_cant_be_final(s)
if (isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs and
not self.is_stub_file and not self.is_class_scope()):
if not invalid_bare_final: # Skip extra error messages.
self.msg.final_without_value(s)
return True
def check_final_implicit_def(self, s: AssignmentStmt) -> None:
"""Do basic checks for final declaration on self in __init__.
Additional re-definition checks are performed by `analyze_lvalue`.
"""
if not s.is_final_def:
return
lval = s.lvalues[0]
assert isinstance(lval, RefExpr)
if isinstance(lval, MemberExpr):
if not self.is_self_member_ref(lval):
self.fail("Final can be only applied to a name or an attribute on self", s)
s.is_final_def = False
return
else:
assert self.function_stack
if self.function_stack[-1].name() != '__init__':
self.fail("Can only declare a final attribute in class body or __init__", s)
s.is_final_def = False
return
def store_final_status(self, s: AssignmentStmt) -> None:
"""If this is a locally valid final declaration, set the corresponding flag on `Var`."""
if s.is_final_def:
if len(s.lvalues) == 1 and isinstance(s.lvalues[0], RefExpr):
node = s.lvalues[0].node
if isinstance(node, Var):
node.is_final = True
node.final_value = self.unbox_literal(s.rvalue)
if (self.is_class_scope() and
(isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs)):
node.final_unset_in_class = True
else:
# Special case: deferred initialization of a final attribute in __init__.
# In this case we just pretend this is a valid final definition to suppress
# errors about assigning to final attribute.
for lval in self.flatten_lvalues(s.lvalues):
if isinstance(lval, MemberExpr) and self.is_self_member_ref(lval):
assert self.type, "Self member outside a class"
cur_node = self.type.names.get(lval.name, None)
if cur_node and isinstance(cur_node.node, Var) and cur_node.node.is_final:
assert self.function_stack
top_function = self.function_stack[-1]
if (top_function.name() == '__init__' and
cur_node.node.final_unset_in_class and
not cur_node.node.final_set_in_init and
not (isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs)):
cur_node.node.final_set_in_init = True
s.is_final_def = True
def flatten_lvalues(self, lvalues: List[Expression]) -> List[Expression]:
res = [] # type: List[Expression]
for lv in lvalues:
if isinstance(lv, (TupleExpr, ListExpr)):
res.extend(self.flatten_lvalues(lv.items))
else:
res.append(lv)
return res
def unbox_literal(self, e: Expression) -> Optional[Union[int, float, bool, str]]:
if isinstance(e, (IntExpr, FloatExpr, StrExpr)):
return e.value
elif isinstance(e, NameExpr) and e.name in ('True', 'False'):
return True if e.name == 'True' else False
return None
def process_type_annotation(self, s: AssignmentStmt) -> None:
"""Analyze type annotation or infer simple literal type."""
if s.type:
lvalue = s.lvalues[-1]
allow_tuple_literal = isinstance(lvalue, TupleExpr)
analyzed = self.anal_type(s.type, allow_tuple_literal=allow_tuple_literal)
# Don't store not ready types (including placeholders).
if analyzed is None or has_placeholder(analyzed):
return
s.type = analyzed
if (self.type and self.type.is_protocol and isinstance(lvalue, NameExpr) and
isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs):
if isinstance(lvalue.node, Var):
lvalue.node.is_abstract_var = True
else:
if (any(isinstance(lv, NameExpr) and lv.is_inferred_def for lv in s.lvalues) and
self.type and self.type.is_protocol and not self.is_func_scope()):
self.fail('All protocol members must have explicitly declared types', s)
# Set the type if the rvalue is a simple literal (even if the above error occurred).
if len(s.lvalues) == 1 and isinstance(s.lvalues[0], RefExpr):
if s.lvalues[0].is_inferred_def:
s.type = self.analyze_simple_literal_type(s.rvalue, s.is_final_def)
if s.type:
# Store type into nodes.
for lvalue in s.lvalues:
self.store_declared_types(lvalue, s.type)
def analyze_simple_literal_type(self, rvalue: Expression, is_final: bool) -> Optional[Type]:
"""Return builtins.int if rvalue is an int literal, etc.
If this is a 'Final' context, we return "Literal[...]" instead."""
if self.options.semantic_analysis_only or self.function_stack:
# Skip this if we're only doing the semantic analysis pass.
# This is mostly to avoid breaking unit tests.
# Also skip inside a function; this is to avoid confusing
# the code that handles dead code due to isinstance()
# inside type variables with value restrictions (like
# AnyStr).
return None
if isinstance(rvalue, FloatExpr):
return self.named_type_or_none('builtins.float')
value = None # type: Optional[LiteralValue]
type_name = None # type: Optional[str]
if isinstance(rvalue, IntExpr):
value, type_name = rvalue.value, 'builtins.int'
if isinstance(rvalue, StrExpr):
value, type_name = rvalue.value, 'builtins.str'
if isinstance(rvalue, BytesExpr):
value, type_name = rvalue.value, 'builtins.bytes'
if isinstance(rvalue, UnicodeExpr):
value, type_name = rvalue.value, 'builtins.unicode'
if type_name is not None:
assert value is not None
typ = self.named_type_or_none(type_name)
if typ and is_final:
return typ.copy_modified(last_known_value=LiteralType(
value=value,
fallback=typ,
line=typ.line,
column=typ.column,
))
return typ
return None
def analyze_alias(self, rvalue: Expression,
allow_placeholder: bool = False) -> Tuple[Optional[Type], List[str],
Set[str], List[str]]:
"""Check if 'rvalue' is a valid type allowed for aliasing (e.g. not a type variable).
If yes, return the corresponding type, a list of
qualified type variable names for generic aliases, a set of names the alias depends on,
and a list of type variables if the alias is generic.
An schematic example for the dependencies:
A = int
B = str
analyze_alias(Dict[A, B])[2] == {'__main__.A', '__main__.B'}
"""
dynamic = bool(self.function_stack and self.function_stack[-1].is_dynamic())
global_scope = not self.type and not self.function_stack
res = analyze_type_alias(rvalue,
self,
self.tvar_scope,
self.plugin,
self.options,
self.is_typeshed_stub_file,
allow_unnormalized=self.is_stub_file,
allow_placeholder=allow_placeholder,
in_dynamic_func=dynamic,
global_scope=global_scope)
typ = None # type: Optional[Type]
if res:
typ, depends_on = res
found_type_vars = typ.accept(TypeVariableQuery(self.lookup_qualified, self.tvar_scope))
alias_tvars = [name for (name, node) in found_type_vars]
qualified_tvars = [node.fullname() for (name, node) in found_type_vars]
else:
alias_tvars = []
depends_on = set()
qualified_tvars = []
return typ, alias_tvars, depends_on, qualified_tvars
def check_and_set_up_type_alias(self, s: AssignmentStmt) -> bool:
"""Check if assignment creates a type alias and set it up as needed.
Return True if it is a type alias (even if the target is not ready),
or False otherwise.
Note: the resulting types for subscripted (including generic) aliases
are also stored in rvalue.analyzed.
"""
lvalue = s.lvalues[0]
if len(s.lvalues) > 1 or not isinstance(lvalue, NameExpr):
# First rule: Only simple assignments like Alias = ... create aliases.
return False
if s.unanalyzed_type is not None:
# Second rule: Explicit type (cls: Type[A] = A) always creates variable, not alias.
return False
existing = self.current_symbol_table().get(lvalue.name)
# Third rule: type aliases can't be re-defined. For example:
# A: Type[float] = int
# A = float # OK, but this doesn't define an alias
# B = int
# B = float # Error!
# Don't create an alias in these cases:
if (existing
and (isinstance(existing.node, Var) # existing variable
or (isinstance(existing.node, TypeAlias)
and not s.is_alias_def) # existing alias
or (isinstance(existing.node, PlaceholderNode)
and existing.node.node.line < s.line))): # previous incomplete definition
# TODO: find a more robust way to track the order of definitions.
# Note: if is_alias_def=True, this is just a node from previous iteration.
if isinstance(existing.node, TypeAlias) and not s.is_alias_def:
self.fail('Cannot assign multiple types to name "{}"'
' without an explicit "Type[...]" annotation'
.format(lvalue.name), lvalue)
return False
non_global_scope = self.type or self.is_func_scope()
if isinstance(s.rvalue, RefExpr) and non_global_scope:
# Fourth rule (special case): Non-subscripted right hand side creates a variable
# at class and function scopes. For example:
#
# class Model:
# ...
# class C:
# model = Model # this is automatically a variable with type 'Type[Model]'
#
# without this rule, this typical use case will require a lot of explicit
# annotations (see the second rule).
return False
rvalue = s.rvalue
if not self.can_be_type_alias(rvalue):
return False
if existing and not isinstance(existing.node, (PlaceholderNode, TypeAlias)):
# Cannot redefine existing node as type alias.
return False
res = None # type: Optional[Type]
if self.is_none_alias(rvalue):
res = NoneType()
alias_tvars, depends_on, qualified_tvars = \
[], set(), [] # type: List[str], Set[str], List[str]
else:
tag = self.track_incomplete_refs()
res, alias_tvars, depends_on, qualified_tvars = \
self.analyze_alias(rvalue, allow_placeholder=True)
if not res:
return False
# TODO: Maybe we only need to reject top-level placeholders, similar
# to base classes.
if self.found_incomplete_ref(tag) or has_placeholder(res):
# Since we have got here, we know this must be a type alias (incomplete refs
# may appear in nested positions), therefore use becomes_typeinfo=True.
self.mark_incomplete(lvalue.name, rvalue, becomes_typeinfo=True)
return True
self.add_type_alias_deps(depends_on)
# In addition to the aliases used, we add deps on unbound
# type variables, since they are erased from target type.
self.add_type_alias_deps(qualified_tvars)
# The above are only direct deps on other aliases.
# For subscripted aliases, type deps from expansion are added in deps.py
# (because the type is stored).
check_for_explicit_any(res, self.options, self.is_typeshed_stub_file, self.msg,
context=s)
# When this type alias gets "inlined", the Any is not explicit anymore,
# so we need to replace it with non-explicit Anys.
res = make_any_non_explicit(res)
no_args = isinstance(res, Instance) and not res.args # type: ignore
fix_instance_types(res, self.fail)
if isinstance(s.rvalue, (IndexExpr, CallExpr)): # CallExpr is for `void = type(None)`
s.rvalue.analyzed = TypeAliasExpr(res, alias_tvars, no_args)
s.rvalue.analyzed.line = s.line
# we use the column from resulting target, to get better location for errors
s.rvalue.analyzed.column = res.column
elif isinstance(s.rvalue, RefExpr):
s.rvalue.is_alias_rvalue = True
alias_node = TypeAlias(res, self.qualified_name(lvalue.name), s.line, s.column,
alias_tvars=alias_tvars, no_args=no_args)
if existing:
# An alias gets updated.
updated = False
if isinstance(existing.node, TypeAlias):
if existing.node.target != res:
# Copy expansion to the existing alias, this matches how we update base classes
# for a TypeInfo _in place_ if there are nested placeholders.
existing.node.target = res
existing.node.alias_tvars = alias_tvars
existing.node.no_args = no_args
updated = True
else:
# Otherwise just replace existing placeholder with type alias.
existing.node = alias_node
updated = True
if updated:
if self.final_iteration:
self.cannot_resolve_name(lvalue.name, 'name', s)
return True
else:
self.progress = True
# We need to defer so that this change can get propagated to base classes.
self.defer(s)
else:
self.add_symbol(lvalue.name, alias_node, s)
if isinstance(rvalue, RefExpr) and isinstance(rvalue.node, TypeAlias):
alias_node.normalized = rvalue.node.normalized
return True
def analyze_lvalue(self,
lval: Lvalue,
nested: bool = False,
explicit_type: bool = False,
is_final: bool = False,
escape_comprehensions: bool = False) -> None:
"""Analyze an lvalue or assignment target.
Args:
lval: The target lvalue
nested: If true, the lvalue is within a tuple or list lvalue expression
explicit_type: Assignment has type annotation
escape_comprehensions: If we are inside a comprehension, set the variable
in the enclosing scope instead. This implements
https://www.python.org/dev/peps/pep-0572/#scope-of-the-target
"""
if escape_comprehensions:
assert isinstance(lval, NameExpr), "assignment expression target must be NameExpr"
if isinstance(lval, NameExpr):
self.analyze_name_lvalue(lval, explicit_type, is_final, escape_comprehensions)
elif isinstance(lval, MemberExpr):
self.analyze_member_lvalue(lval, explicit_type, is_final)
if explicit_type and not self.is_self_member_ref(lval):
self.fail('Type cannot be declared in assignment to non-self '
'attribute', lval)
elif isinstance(lval, IndexExpr):
if explicit_type:
self.fail('Unexpected type declaration', lval)
lval.accept(self)
elif isinstance(lval, TupleExpr):
items = lval.items
if len(items) == 0 and isinstance(lval, TupleExpr):
self.fail("can't assign to ()", lval)
self.analyze_tuple_or_list_lvalue(lval, explicit_type)
elif isinstance(lval, StarExpr):
if nested:
self.analyze_lvalue(lval.expr, nested, explicit_type)
else:
self.fail('Starred assignment target must be in a list or tuple', lval)
else:
self.fail('Invalid assignment target', lval)
def analyze_name_lvalue(self,
lvalue: NameExpr,
explicit_type: bool,
is_final: bool,
escape_comprehensions: bool) -> None:
"""Analyze an lvalue that targets a name expression.
Arguments are similar to "analyze_lvalue".
"""
if lvalue.node:
# This has been bound already in a previous iteration.
return
name = lvalue.name
if self.is_alias_for_final_name(name):
if is_final:
self.fail("Cannot redefine an existing name as final", lvalue)
else:
self.msg.cant_assign_to_final(name, self.type is not None, lvalue)
kind = self.current_symbol_kind()
names = self.current_symbol_table()
existing = names.get(name)
outer = self.is_global_or_nonlocal(name)
if (not existing or isinstance(existing.node, PlaceholderNode)) and not outer:
# Define new variable.
var = self.make_name_lvalue_var(lvalue, kind, not explicit_type)
added = self.add_symbol(name, var, lvalue, escape_comprehensions=escape_comprehensions)
# Only bind expression if we successfully added name to symbol table.
if added:
lvalue.is_new_def = True
lvalue.is_inferred_def = True
lvalue.kind = kind
lvalue.node = var
if kind == GDEF:
lvalue.fullname = var._fullname
else:
lvalue.fullname = lvalue.name
if self.is_func_scope():
if unmangle(name) == '_':
# Special case for assignment to local named '_': always infer 'Any'.
typ = AnyType(TypeOfAny.special_form)
self.store_declared_types(lvalue, typ)
if is_final and self.is_final_redefinition(kind, name):
self.fail("Cannot redefine an existing name as final", lvalue)
else:
self.make_name_lvalue_point_to_existing_def(lvalue, explicit_type, is_final)
def is_final_redefinition(self, kind: int, name: str) -> bool:
if kind == GDEF:
return self.is_mangled_global(name) and not self.is_initial_mangled_global(name)
elif kind == MDEF and self.type:
return unmangle(name) + "'" in self.type.names
return False
def is_alias_for_final_name(self, name: str) -> bool:
if self.is_func_scope():
if not name.endswith("'"):
# Not a mangled name -- can't be an alias
return False
name = unmangle(name)
assert self.locals[-1] is not None, "No locals at function scope"
existing = self.locals[-1].get(name)
return existing is not None and is_final_node(existing.node)
elif self.type is not None:
orig_name = unmangle(name) + "'"
if name == orig_name:
return False
existing = self.type.names.get(orig_name)
return existing is not None and is_final_node(existing.node)
else:
orig_name = unmangle(name) + "'"
if name == orig_name:
return False
existing = self.globals.get(orig_name)
return existing is not None and is_final_node(existing.node)
def make_name_lvalue_var(self, lvalue: NameExpr, kind: int, inferred: bool) -> Var:
"""Return a Var node for an lvalue that is a name expression."""
v = Var(lvalue.name)
v.set_line(lvalue)
v.is_inferred = inferred
if kind == MDEF:
assert self.type is not None
v.info = self.type
v.is_initialized_in_class = True
if kind != LDEF:
v._fullname = self.qualified_name(lvalue.name)
else:
# fullanme should never stay None
v._fullname = lvalue.name
v.is_ready = False # Type not inferred yet
return v
def make_name_lvalue_point_to_existing_def(
self,
lval: NameExpr,
explicit_type: bool,
is_final: bool) -> None:
"""Update an lvalue to point to existing definition in the same scope.
Arguments are similar to "analyze_lvalue".
Assume that an existing name exists.
"""
if is_final:
# Redefining an existing name with final is always an error.
self.fail("Cannot redefine an existing name as final", lval)
original_def = self.lookup(lval.name, lval, suppress_errors=True)
if original_def is None and self.type and not self.is_func_scope():
# Workaround to allow "x, x = ..." in class body.
original_def = self.type.get(lval.name)
if explicit_type:
# Don't re-bind if there is a type annotation.
self.name_already_defined(lval.name, lval, original_def)
else:
# Bind to an existing name.
if original_def:
self.bind_name_expr(lval, original_def)
else:
self.name_not_defined(lval.name, lval)
self.check_lvalue_validity(lval.node, lval)
def analyze_tuple_or_list_lvalue(self, lval: TupleExpr,
explicit_type: bool = False) -> None:
"""Analyze an lvalue or assignment target that is a list or tuple."""
items = lval.items
star_exprs = [item for item in items if isinstance(item, StarExpr)]
if len(star_exprs) > 1:
self.fail('Two starred expressions in assignment', lval)
else:
if len(star_exprs) == 1:
star_exprs[0].valid = True
for i in items:
self.analyze_lvalue(i, nested=True, explicit_type=explicit_type)
def analyze_member_lvalue(self, lval: MemberExpr, explicit_type: bool, is_final: bool) -> None:
"""Analyze lvalue that is a member expression.
Arguments:
lval: The target lvalue
explicit_type: Assignment has type annotation
is_final: Is the target final
"""
if lval.node:
# This has been bound already in a previous iteration.
return
lval.accept(self)
if self.is_self_member_ref(lval):
assert self.type, "Self member outside a class"
cur_node = self.type.names.get(lval.name)
node = self.type.get(lval.name)
if cur_node and is_final:
# Overrides will be checked in type checker.
self.fail("Cannot redefine an existing name as final", lval)
# On first encounter with this definition, if this attribute was defined before
# with an inferred type and it's marked with an explicit type now, give an error.
if (not lval.node and cur_node and isinstance(cur_node.node, Var) and
cur_node.node.is_inferred and explicit_type):
self.attribute_already_defined(lval.name, lval, cur_node)
# If the attribute of self is not defined in superclasses, create a new Var, ...
if (node is None
or (isinstance(node.node, Var) and node.node.is_abstract_var)
# ... also an explicit declaration on self also creates a new Var.
# Note that `explicit_type` might has been erased for bare `Final`,
# so we also check if `is_final` is passed.
or (cur_node is None and (explicit_type or is_final))):
if self.type.is_protocol and node is None:
self.fail("Protocol members cannot be defined via assignment to self", lval)
else:
# Implicit attribute definition in __init__.
lval.is_new_def = True
lval.is_inferred_def = True
v = Var(lval.name)
v.set_line(lval)
v._fullname = self.qualified_name(lval.name)
v.info = self.type
v.is_ready = False
v.explicit_self_type = explicit_type or is_final
lval.def_var = v
lval.node = v
# TODO: should we also set lval.kind = MDEF?
self.type.names[lval.name] = SymbolTableNode(MDEF, v, implicit=True)
self.check_lvalue_validity(lval.node, lval)
def is_self_member_ref(self, memberexpr: MemberExpr) -> bool:
"""Does memberexpr to refer to an attribute of self?"""
if not isinstance(memberexpr.expr, NameExpr):
return False
node = memberexpr.expr.node
return isinstance(node, Var) and node.is_self
def check_lvalue_validity(self, node: Union[Expression, SymbolNode, None],
ctx: Context) -> None:
if isinstance(node, TypeVarExpr):
self.fail('Invalid assignment target', ctx)
elif isinstance(node, TypeInfo):
self.fail(message_registry.CANNOT_ASSIGN_TO_TYPE, ctx)
def store_declared_types(self, lvalue: Lvalue, typ: Type) -> None:
if isinstance(typ, StarType) and not isinstance(lvalue, StarExpr):
self.fail('Star type only allowed for starred expressions', lvalue)
if isinstance(lvalue, RefExpr):
lvalue.is_inferred_def = False
if isinstance(lvalue.node, Var):
var = lvalue.node
var.type = typ
var.is_ready = True
# If node is not a variable, we'll catch it elsewhere.
elif isinstance(lvalue, TupleExpr):
typ = get_proper_type(typ)
if isinstance(typ, TupleType):
if len(lvalue.items) != len(typ.items):
self.fail('Incompatible number of tuple items', lvalue)
return
for item, itemtype in zip(lvalue.items, typ.items):
self.store_declared_types(item, itemtype)
else:
self.fail('Tuple type expected for multiple variables',
lvalue)
elif isinstance(lvalue, StarExpr):
# Historical behavior for the old parser
if isinstance(typ, StarType):
self.store_declared_types(lvalue.expr, typ.type)
else:
self.store_declared_types(lvalue.expr, typ)
else:
# This has been flagged elsewhere as an error, so just ignore here.
pass
def process_typevar_declaration(self, s: AssignmentStmt) -> bool:
"""Check if s declares a TypeVar; it yes, store it in symbol table.
Return True if this looks like a type variable declaration (but maybe
with errors), otherwise return False.
"""
call = self.get_typevar_declaration(s)
if not call:
return False
lvalue = s.lvalues[0]
assert isinstance(lvalue, NameExpr)
if s.type:
self.fail("Cannot declare the type of a type variable", s)
return False
name = lvalue.name
if not self.check_typevar_name(call, name, s):
return False
# Constraining types
n_values = call.arg_kinds[1:].count(ARG_POS)
values = self.analyze_value_types(call.args[1:1 + n_values])
res = self.process_typevar_parameters(call.args[1 + n_values:],
call.arg_names[1 + n_values:],
call.arg_kinds[1 + n_values:],
n_values,
s)
if res is None:
return False
variance, upper_bound = res
existing = self.current_symbol_table().get(name)
if existing and not (isinstance(existing.node, PlaceholderNode) or
# Also give error for another type variable with the same name.
(isinstance(existing.node, TypeVarExpr) and
existing.node is call.analyzed)):
self.fail("Cannot redefine '%s' as a type variable" % name, s)
return False
if self.options.disallow_any_unimported:
for idx, constraint in enumerate(values, start=1):
if has_any_from_unimported_type(constraint):
prefix = "Constraint {}".format(idx)
self.msg.unimported_type_becomes_any(prefix, constraint, s)
if has_any_from_unimported_type(upper_bound):
prefix = "Upper bound of type variable"
self.msg.unimported_type_becomes_any(prefix, upper_bound, s)
for t in values + [upper_bound]:
check_for_explicit_any(t, self.options, self.is_typeshed_stub_file, self.msg,
context=s)
# mypyc suppresses making copies of a function to check each
# possible type, so set the upper bound to Any to prevent that
# from causing errors.
if values and self.options.mypyc:
upper_bound = AnyType(TypeOfAny.implementation_artifact)
# Yes, it's a valid type variable definition! Add it to the symbol table.
if not call.analyzed:
type_var = TypeVarExpr(name, self.qualified_name(name),
values, upper_bound, variance)
type_var.line = call.line
call.analyzed = type_var
else:
assert isinstance(call.analyzed, TypeVarExpr)
if call.analyzed.values != values or call.analyzed.upper_bound != upper_bound:
self.progress = True
call.analyzed.upper_bound = upper_bound
call.analyzed.values = values
self.add_symbol(name, call.analyzed, s)
return True
def check_typevar_name(self, call: CallExpr, name: str, context: Context) -> bool:
name = unmangle(name)
if len(call.args) < 1:
self.fail("Too few arguments for TypeVar()", context)
return False
if (not isinstance(call.args[0], (StrExpr, BytesExpr, UnicodeExpr))
or not call.arg_kinds[0] == ARG_POS):
self.fail("TypeVar() expects a string literal as first argument", context)
return False
elif call.args[0].value != name:
msg = "String argument 1 '{}' to TypeVar(...) does not match variable name '{}'"
self.fail(msg.format(call.args[0].value, name), context)
return False
return True
def get_typevar_declaration(self, s: AssignmentStmt) -> Optional[CallExpr]:
"""Returns the TypeVar() call expression if `s` is a type var declaration
or None otherwise.
"""
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], NameExpr):
return None
if not isinstance(s.rvalue, CallExpr):
return None
call = s.rvalue
callee = call.callee
if not isinstance(callee, RefExpr):
return None
if callee.fullname != 'typing.TypeVar':
return None
return call
def process_typevar_parameters(self, args: List[Expression],
names: List[Optional[str]],
kinds: List[int],
num_values: int,
context: Context) -> Optional[Tuple[int, Type]]:
has_values = (num_values > 0)
covariant = False
contravariant = False
upper_bound = self.object_type() # type: Type
for param_value, param_name, param_kind in zip(args, names, kinds):
if not param_kind == ARG_NAMED:
self.fail("Unexpected argument to TypeVar()", context)
return None
if param_name == 'covariant':
if isinstance(param_value, NameExpr):
if param_value.name == 'True':
covariant = True
else:
self.fail("TypeVar 'covariant' may only be 'True'", context)
return None
else:
self.fail("TypeVar 'covariant' may only be 'True'", context)
return None
elif param_name == 'contravariant':
if isinstance(param_value, NameExpr):
if param_value.name == 'True':
contravariant = True
else:
self.fail("TypeVar 'contravariant' may only be 'True'", context)
return None
else:
self.fail("TypeVar 'contravariant' may only be 'True'", context)
return None
elif param_name == 'bound':
if has_values:
self.fail("TypeVar cannot have both values and an upper bound", context)
return None
try:
# We want to use our custom error message below, so we suppress
# the default error message for invalid types here.
analyzed = self.expr_to_analyzed_type(param_value,
allow_placeholder=True,
report_invalid_types=False)
if analyzed is None:
# Type variables are special: we need to place them in the symbol table
# soon, even if upper bound is not ready yet. Otherwise avoiding
# a "deadlock" in this common pattern would be tricky:
# T = TypeVar('T', bound=Custom[Any])
# class Custom(Generic[T]):
# ...
analyzed = PlaceholderType(None, [], context.line)
upper_bound = get_proper_type(analyzed)
if isinstance(upper_bound, AnyType) and upper_bound.is_from_error:
self.fail("TypeVar 'bound' must be a type", param_value)
# Note: we do not return 'None' here -- we want to continue
# using the AnyType as the upper bound.
except TypeTranslationError:
self.fail("TypeVar 'bound' must be a type", param_value)
return None
elif param_name == 'values':
# Probably using obsolete syntax with values=(...). Explain the current syntax.
self.fail("TypeVar 'values' argument not supported", context)
self.fail("Use TypeVar('T', t, ...) instead of TypeVar('T', values=(t, ...))",
context)
return None
else:
self.fail("Unexpected argument to TypeVar(): {}".format(param_name), context)
return None
if covariant and contravariant:
self.fail("TypeVar cannot be both covariant and contravariant", context)
return None
elif num_values == 1:
self.fail("TypeVar cannot have only a single constraint", context)
return None
elif covariant:
variance = COVARIANT
elif contravariant:
variance = CONTRAVARIANT
else:
variance = INVARIANT
return variance, upper_bound
def basic_new_typeinfo(self, name: str, basetype_or_fallback: Instance) -> TypeInfo:
class_def = ClassDef(name, Block([]))
if self.is_func_scope() and not self.type:
# Full names of generated classes should always be prefixed with the module names
# even if they are nested in a function, since these classes will be (de-)serialized.
# (Note that the caller should append @line to the name to avoid collisions.)
# TODO: clean this up, see #6422.
class_def.fullname = self.cur_mod_id + '.' + self.qualified_name(name)
else:
class_def.fullname = self.qualified_name(name)
info = TypeInfo(SymbolTable(), class_def, self.cur_mod_id)
class_def.info = info
mro = basetype_or_fallback.type.mro
if not mro:
# Forward reference, MRO should be recalculated in third pass.
mro = [basetype_or_fallback.type, self.object_type().type]
info.mro = [info] + mro
info.bases = [basetype_or_fallback]
return info
def analyze_value_types(self, items: List[Expression]) -> List[Type]:
"""Analyze types from values expressions in type variable definition."""
result = [] # type: List[Type]
for node in items:
try:
analyzed = self.anal_type(expr_to_unanalyzed_type(node),
allow_placeholder=True)
if analyzed is None:
# Type variables are special: we need to place them in the symbol table
# soon, even if some value is not ready yet, see process_typevar_parameters()
# for an example.
analyzed = PlaceholderType(None, [], node.line)
result.append(analyzed)
except TypeTranslationError:
self.fail('Type expected', node)
result.append(AnyType(TypeOfAny.from_error))
return result
def check_classvar(self, s: AssignmentStmt) -> None:
"""Check if assignment defines a class variable."""
lvalue = s.lvalues[0]
if len(s.lvalues) != 1 or not isinstance(lvalue, RefExpr):
return
if not s.type or not self.is_classvar(s.type):
return
if self.is_class_scope() and isinstance(lvalue, NameExpr):
node = lvalue.node
if isinstance(node, Var):
node.is_classvar = True
elif not isinstance(lvalue, MemberExpr) or self.is_self_member_ref(lvalue):
# In case of member access, report error only when assigning to self
# Other kinds of member assignments should be already reported
self.fail_invalid_classvar(lvalue)
def is_classvar(self, typ: Type) -> bool:
if not isinstance(typ, UnboundType):
return False
sym = self.lookup_qualified(typ.name, typ)
if not sym or not sym.node:
return False
return sym.node.fullname() == 'typing.ClassVar'
def is_final_type(self, typ: Optional[Type]) -> bool:
if not isinstance(typ, UnboundType):
return False
sym = self.lookup_qualified(typ.name, typ)
if not sym or not sym.node:
return False
return sym.node.fullname() in ('typing.Final',
'typing_extensions.Final')
def fail_invalid_classvar(self, context: Context) -> None:
self.fail('ClassVar can only be used for assignments in class body', context)
def process_module_assignment(self, lvals: List[Lvalue], rval: Expression,
ctx: AssignmentStmt) -> None:
"""Propagate module references across assignments.
Recursively handles the simple form of iterable unpacking; doesn't
handle advanced unpacking with *rest, dictionary unpacking, etc.
In an expression like x = y = z, z is the rval and lvals will be [x,
y].
"""
if (isinstance(rval, (TupleExpr, ListExpr))
and all(isinstance(v, TupleExpr) for v in lvals)):
# rval and all lvals are either list or tuple, so we are dealing
# with unpacking assignment like `x, y = a, b`. Mypy didn't
# understand our all(isinstance(...)), so cast them as TupleExpr
# so mypy knows it is safe to access their .items attribute.
seq_lvals = cast(List[TupleExpr], lvals)
# given an assignment like:
# (x, y) = (m, n) = (a, b)
# we now have:
# seq_lvals = [(x, y), (m, n)]
# seq_rval = (a, b)
# We now zip this into:
# elementwise_assignments = [(a, x, m), (b, y, n)]
# where each elementwise assignment includes one element of rval and the
# corresponding element of each lval. Basically we unpack
# (x, y) = (m, n) = (a, b)
# into elementwise assignments
# x = m = a
# y = n = b
# and then we recursively call this method for each of those assignments.
# If the rval and all lvals are not all of the same length, zip will just ignore
# extra elements, so no error will be raised here; mypy will later complain
# about the length mismatch in type-checking.
elementwise_assignments = zip(rval.items, *[v.items for v in seq_lvals])
for rv, *lvs in elementwise_assignments:
self.process_module_assignment(lvs, rv, ctx)
elif isinstance(rval, RefExpr):
rnode = self.lookup_type_node(rval)
if rnode and isinstance(rnode.node, MypyFile):
for lval in lvals:
if not isinstance(lval, NameExpr):
continue
# respect explicitly annotated type
if (isinstance(lval.node, Var) and lval.node.type is not None):
continue
lnode = self.current_symbol_table().get(lval.name)
if lnode:
if isinstance(lnode.node, MypyFile) and lnode.node is not rnode.node:
self.fail(
"Cannot assign multiple modules to name '{}' "
"without explicit 'types.ModuleType' annotation".format(lval.name),
ctx)
# never create module alias except on initial var definition
elif lval.is_inferred_def:
lnode.kind = self.current_symbol_kind()
assert rnode.node is not None
lnode.node = rnode.node
def process__all__(self, s: AssignmentStmt) -> None:
"""Export names if argument is a __all__ assignment."""
if (len(s.lvalues) == 1 and isinstance(s.lvalues[0], NameExpr) and
s.lvalues[0].name == '__all__' and s.lvalues[0].kind == GDEF and
isinstance(s.rvalue, (ListExpr, TupleExpr))):
self.add_exports(s.rvalue.items)
#
# Misc statements
#
def visit_block(self, b: Block) -> None:
if b.is_unreachable:
return
self.block_depth[-1] += 1
for s in b.body:
self.accept(s)
self.block_depth[-1] -= 1
def visit_block_maybe(self, b: Optional[Block]) -> None:
if b:
self.visit_block(b)
def visit_expression_stmt(self, s: ExpressionStmt) -> None:
self.statement = s
s.expr.accept(self)
def visit_return_stmt(self, s: ReturnStmt) -> None:
self.statement = s
if not self.is_func_scope():
self.fail("'return' outside function", s)
if s.expr:
s.expr.accept(self)
def visit_raise_stmt(self, s: RaiseStmt) -> None:
self.statement = s
if s.expr:
s.expr.accept(self)
if s.from_expr:
s.from_expr.accept(self)
def visit_assert_stmt(self, s: AssertStmt) -> None:
self.statement = s
if s.expr:
s.expr.accept(self)
if s.msg:
s.msg.accept(self)
def visit_operator_assignment_stmt(self,
s: OperatorAssignmentStmt) -> None:
self.statement = s
s.lvalue.accept(self)
s.rvalue.accept(self)
if (isinstance(s.lvalue, NameExpr) and s.lvalue.name == '__all__' and
s.lvalue.kind == GDEF and isinstance(s.rvalue, (ListExpr, TupleExpr))):
self.add_exports(s.rvalue.items)
def visit_while_stmt(self, s: WhileStmt) -> None:
self.statement = s
s.expr.accept(self)
self.loop_depth += 1
s.body.accept(self)
self.loop_depth -= 1
self.visit_block_maybe(s.else_body)
def visit_for_stmt(self, s: ForStmt) -> None:
self.statement = s
s.expr.accept(self)
# Bind index variables and check if they define new names.
self.analyze_lvalue(s.index, explicit_type=s.index_type is not None)
if s.index_type:
if self.is_classvar(s.index_type):
self.fail_invalid_classvar(s.index)
allow_tuple_literal = isinstance(s.index, TupleExpr)
analyzed = self.anal_type(s.index_type, allow_tuple_literal=allow_tuple_literal)
if analyzed is not None:
self.store_declared_types(s.index, analyzed)
s.index_type = analyzed
self.loop_depth += 1
self.visit_block(s.body)
self.loop_depth -= 1
self.visit_block_maybe(s.else_body)
def visit_break_stmt(self, s: BreakStmt) -> None:
self.statement = s
if self.loop_depth == 0:
self.fail("'break' outside loop", s, serious=True, blocker=True)
def visit_continue_stmt(self, s: ContinueStmt) -> None:
self.statement = s
if self.loop_depth == 0:
self.fail("'continue' outside loop", s, serious=True, blocker=True)
def visit_if_stmt(self, s: IfStmt) -> None:
self.statement = s
infer_reachability_of_if_statement(s, self.options)
for i in range(len(s.expr)):
s.expr[i].accept(self)
self.visit_block(s.body[i])
self.visit_block_maybe(s.else_body)
def visit_try_stmt(self, s: TryStmt) -> None:
self.statement = s
self.analyze_try_stmt(s, self)
def analyze_try_stmt(self, s: TryStmt, visitor: NodeVisitor[None]) -> None:
s.body.accept(visitor)
for type, var, handler in zip(s.types, s.vars, s.handlers):
if type:
type.accept(visitor)
if var:
self.analyze_lvalue(var)
handler.accept(visitor)
if s.else_body:
s.else_body.accept(visitor)
if s.finally_body:
s.finally_body.accept(visitor)
def visit_with_stmt(self, s: WithStmt) -> None:
self.statement = s
types = [] # type: List[Type]
if s.unanalyzed_type:
assert isinstance(s.unanalyzed_type, ProperType)
actual_targets = [t for t in s.target if t is not None]
if len(actual_targets) == 0:
# We have a type for no targets
self.fail('Invalid type comment: "with" statement has no targets', s)
elif len(actual_targets) == 1:
# We have one target and one type
types = [s.unanalyzed_type]
elif isinstance(s.unanalyzed_type, TupleType):
# We have multiple targets and multiple types
if len(actual_targets) == len(s.unanalyzed_type.items):
types = s.unanalyzed_type.items.copy()
else:
# But it's the wrong number of items
self.fail('Incompatible number of types for "with" targets', s)
else:
# We have multiple targets and one type
self.fail('Multiple types expected for multiple "with" targets', s)
new_types = [] # type: List[Type]
for e, n in zip(s.expr, s.target):
e.accept(self)
if n:
self.analyze_lvalue(n, explicit_type=s.unanalyzed_type is not None)
# Since we have a target, pop the next type from types
if types:
t = types.pop(0)
if self.is_classvar(t):
self.fail_invalid_classvar(n)
allow_tuple_literal = isinstance(n, TupleExpr)
analyzed = self.anal_type(t, allow_tuple_literal=allow_tuple_literal)
if analyzed is not None:
# TODO: Deal with this better
new_types.append(analyzed)
self.store_declared_types(n, analyzed)
s.analyzed_types = new_types
self.visit_block(s.body)
def visit_del_stmt(self, s: DelStmt) -> None:
self.statement = s
s.expr.accept(self)
if not self.is_valid_del_target(s.expr):
self.fail('Invalid delete target', s)
def is_valid_del_target(self, s: Expression) -> bool:
if isinstance(s, (IndexExpr, NameExpr, MemberExpr)):
return True
elif isinstance(s, (TupleExpr, ListExpr)):
return all(self.is_valid_del_target(item) for item in s.items)
else:
return False
def visit_global_decl(self, g: GlobalDecl) -> None:
self.statement = g
for name in g.names:
if name in self.nonlocal_decls[-1]:
self.fail("Name '{}' is nonlocal and global".format(name), g)
self.global_decls[-1].add(name)
def visit_nonlocal_decl(self, d: NonlocalDecl) -> None:
self.statement = d
if not self.is_func_scope():
self.fail("nonlocal declaration not allowed at module level", d)
else:
for name in d.names:
for table in reversed(self.locals[:-1]):
if table is not None and name in table:
break
else:
self.fail("No binding for nonlocal '{}' found".format(name), d)
if self.locals[-1] is not None and name in self.locals[-1]:
self.fail("Name '{}' is already defined in local "
"scope before nonlocal declaration".format(name), d)
if name in self.global_decls[-1]:
self.fail("Name '{}' is nonlocal and global".format(name), d)
self.nonlocal_decls[-1].add(name)
def visit_print_stmt(self, s: PrintStmt) -> None:
self.statement = s
for arg in s.args:
arg.accept(self)
if s.target:
s.target.accept(self)
def visit_exec_stmt(self, s: ExecStmt) -> None:
self.statement = s
s.expr.accept(self)
if s.globals:
s.globals.accept(self)
if s.locals:
s.locals.accept(self)
#
# Expressions
#
def visit_name_expr(self, expr: NameExpr) -> None:
n = self.lookup(expr.name, expr)
if n:
self.bind_name_expr(expr, n)
def bind_name_expr(self, expr: NameExpr, sym: SymbolTableNode) -> None:
"""Bind name expression to a symbol table node."""
if isinstance(sym.node, TypeVarExpr) and self.tvar_scope.get_binding(sym):
self.fail("'{}' is a type variable and only valid in type "
"context".format(expr.name), expr)
elif isinstance(sym.node, PlaceholderNode):
self.process_placeholder(expr.name, 'name', expr)
else:
expr.kind = sym.kind
expr.node = sym.node
expr.fullname = sym.fullname
def visit_super_expr(self, expr: SuperExpr) -> None:
if not self.type and not expr.call.args:
self.fail('"super" used outside class', expr)
return
expr.info = self.type
for arg in expr.call.args:
arg.accept(self)
def visit_tuple_expr(self, expr: TupleExpr) -> None:
for item in expr.items:
if isinstance(item, StarExpr):
item.valid = True
item.accept(self)
def visit_list_expr(self, expr: ListExpr) -> None:
for item in expr.items:
if isinstance(item, StarExpr):
item.valid = True
item.accept(self)
def visit_set_expr(self, expr: SetExpr) -> None:
for item in expr.items:
if isinstance(item, StarExpr):
item.valid = True
item.accept(self)
def visit_dict_expr(self, expr: DictExpr) -> None:
for key, value in expr.items:
if key is not None:
key.accept(self)
value.accept(self)
def visit_star_expr(self, expr: StarExpr) -> None:
if not expr.valid:
# XXX TODO Change this error message
self.fail('Can use starred expression only as assignment target', expr)
else:
expr.expr.accept(self)
def visit_yield_from_expr(self, e: YieldFromExpr) -> None:
if not self.is_func_scope(): # not sure
self.fail("'yield from' outside function", e, serious=True, blocker=True)
else:
if self.function_stack[-1].is_coroutine:
self.fail("'yield from' in async function", e, serious=True, blocker=True)
else:
self.function_stack[-1].is_generator = True
if e.expr:
e.expr.accept(self)
def visit_call_expr(self, expr: CallExpr) -> None:
"""Analyze a call expression.
Some call expressions are recognized as special forms, including
cast(...).
"""
expr.callee.accept(self)
if refers_to_fullname(expr.callee, 'typing.cast'):
# Special form cast(...).
if not self.check_fixed_args(expr, 2, 'cast'):
return
# Translate first argument to an unanalyzed type.
try:
target = expr_to_unanalyzed_type(expr.args[0])
except TypeTranslationError:
self.fail('Cast target is not a type', expr)
return
# Piggyback CastExpr object to the CallExpr object; it takes
# precedence over the CallExpr semantics.
expr.analyzed = CastExpr(expr.args[1], target)
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'builtins.reveal_type'):
if not self.check_fixed_args(expr, 1, 'reveal_type'):
return
expr.analyzed = RevealExpr(kind=REVEAL_TYPE, expr=expr.args[0])
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'builtins.reveal_locals'):
# Store the local variable names into the RevealExpr for use in the
# type checking pass
local_nodes = [] # type: List[Var]
if self.is_module_scope():
# try to determine just the variable declarations in module scope
# self.globals.values() contains SymbolTableNode's
# Each SymbolTableNode has an attribute node that is nodes.Var
# look for variable nodes that marked as is_inferred
# Each symboltable node has a Var node as .node
local_nodes = [n.node
for name, n in self.globals.items()
if getattr(n.node, 'is_inferred', False)
and isinstance(n.node, Var)]
elif self.is_class_scope():
# type = None # type: Optional[TypeInfo]
if self.type is not None:
local_nodes = [st.node
for st in self.type.names.values()
if isinstance(st.node, Var)]
elif self.is_func_scope():
# locals = None # type: List[Optional[SymbolTable]]
if self.locals is not None:
symbol_table = self.locals[-1]
if symbol_table is not None:
local_nodes = [st.node
for st in symbol_table.values()
if isinstance(st.node, Var)]
expr.analyzed = RevealExpr(kind=REVEAL_LOCALS, local_nodes=local_nodes)
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'typing.Any'):
# Special form Any(...) no longer supported.
self.fail('Any(...) is no longer supported. Use cast(Any, ...) instead', expr)
elif refers_to_fullname(expr.callee, 'typing._promote'):
# Special form _promote(...).
if not self.check_fixed_args(expr, 1, '_promote'):
return
# Translate first argument to an unanalyzed type.
try:
target = expr_to_unanalyzed_type(expr.args[0])
except TypeTranslationError:
self.fail('Argument 1 to _promote is not a type', expr)
return
expr.analyzed = PromoteExpr(target)
expr.analyzed.line = expr.line
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'builtins.dict'):
expr.analyzed = self.translate_dict_call(expr)
elif refers_to_fullname(expr.callee, 'builtins.divmod'):
if not self.check_fixed_args(expr, 2, 'divmod'):
return
expr.analyzed = OpExpr('divmod', expr.args[0], expr.args[1])
expr.analyzed.line = expr.line
expr.analyzed.accept(self)
else:
# Normal call expression.
for a in expr.args:
a.accept(self)
if (isinstance(expr.callee, MemberExpr) and
isinstance(expr.callee.expr, NameExpr) and
expr.callee.expr.name == '__all__' and
expr.callee.expr.kind == GDEF and
expr.callee.name in ('append', 'extend')):
if expr.callee.name == 'append' and expr.args:
self.add_exports(expr.args[0])
elif (expr.callee.name == 'extend' and expr.args and
isinstance(expr.args[0], (ListExpr, TupleExpr))):
self.add_exports(expr.args[0].items)
def translate_dict_call(self, call: CallExpr) -> Optional[DictExpr]:
"""Translate 'dict(x=y, ...)' to {'x': y, ...}.
For other variants of dict(...), return None.
"""
if not call.args:
return None
if not all(kind == ARG_NAMED for kind in call.arg_kinds):
# Must still accept those args.
for a in call.args:
a.accept(self)
return None
expr = DictExpr([(StrExpr(cast(str, key)), value) # since they are all ARG_NAMED
for key, value in zip(call.arg_names, call.args)])
expr.set_line(call)
expr.accept(self)
return expr
def check_fixed_args(self, expr: CallExpr, numargs: int,
name: str) -> bool:
"""Verify that expr has specified number of positional args.
Return True if the arguments are valid.
"""
s = 's'
if numargs == 1:
s = ''
if len(expr.args) != numargs:
self.fail("'%s' expects %d argument%s" % (name, numargs, s),
expr)
return False
if expr.arg_kinds != [ARG_POS] * numargs:
self.fail("'%s' must be called with %s positional argument%s" %
(name, numargs, s), expr)
return False
return True
def visit_member_expr(self, expr: MemberExpr) -> None:
base = expr.expr
base.accept(self)
if isinstance(base, RefExpr) and isinstance(base.node, MypyFile):
# Handle module attribute.
sym = self.get_module_symbol(base.node, expr.name)
if sym:
if isinstance(sym.node, PlaceholderNode):
self.process_placeholder(expr.name, 'attribute', expr)
return
expr.kind = sym.kind
expr.fullname = sym.fullname
expr.node = sym.node
elif isinstance(base, RefExpr):
# This branch handles the case C.bar (or cls.bar or self.bar inside
# a classmethod/method), where C is a class and bar is a type
# definition or a module resulting from `import bar` (or a module
# assignment) inside class C. We look up bar in the class' TypeInfo
# namespace. This is done only when bar is a module or a type;
# other things (e.g. methods) are handled by other code in
# checkmember.
type_info = None
if isinstance(base.node, TypeInfo):
# C.bar where C is a class
type_info = base.node
elif isinstance(base.node, Var) and self.type and self.function_stack:
# check for self.bar or cls.bar in method/classmethod
func_def = self.function_stack[-1]
if not func_def.is_static and isinstance(func_def.type, CallableType):
formal_arg = func_def.type.argument_by_name(base.node.name())
if formal_arg and formal_arg.pos == 0:
type_info = self.type
elif isinstance(base.node, TypeAlias) and base.node.no_args:
assert isinstance(base.node.target, ProperType)
# TODO: support chained aliases.
if isinstance(base.node.target, Instance):
type_info = base.node.target.type
if type_info:
n = type_info.names.get(expr.name)
if n is not None and isinstance(n.node, (MypyFile, TypeInfo, TypeAlias)):
if not n:
return
expr.kind = n.kind
expr.fullname = n.fullname
expr.node = n.node
def visit_op_expr(self, expr: OpExpr) -> None:
expr.left.accept(self)
if expr.op in ('and', 'or'):
inferred = infer_condition_value(expr.left, self.options)
if ((inferred in (ALWAYS_FALSE, MYPY_FALSE) and expr.op == 'and') or
(inferred in (ALWAYS_TRUE, MYPY_TRUE) and expr.op == 'or')):
expr.right_unreachable = True
return
elif ((inferred in (ALWAYS_TRUE, MYPY_TRUE) and expr.op == 'and') or
(inferred in (ALWAYS_FALSE, MYPY_FALSE) and expr.op == 'or')):
expr.right_always = True
expr.right.accept(self)
def visit_comparison_expr(self, expr: ComparisonExpr) -> None:
for operand in expr.operands:
operand.accept(self)
def visit_unary_expr(self, expr: UnaryExpr) -> None:
expr.expr.accept(self)
def visit_index_expr(self, expr: IndexExpr) -> None:
base = expr.base
base.accept(self)
if (isinstance(base, RefExpr)
and isinstance(base.node, TypeInfo)
and not base.node.is_generic()):
expr.index.accept(self)
elif ((isinstance(base, RefExpr) and isinstance(base.node, TypeAlias))
or refers_to_class_or_function(base)):
# We need to do full processing on every iteration, since some type
# arguments may contain placeholder types.
self.analyze_type_application(expr)
else:
expr.index.accept(self)
def analyze_type_application(self, expr: IndexExpr) -> None:
"""Analyze special form -- type application (either direct or via type aliasing)."""
types = self.analyze_type_application_args(expr)
if types is None:
return
base = expr.base
expr.analyzed = TypeApplication(base, types)
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
# Types list, dict, set are not subscriptable, prohibit this if
# subscripted either via type alias...
if isinstance(base, RefExpr) and isinstance(base.node, TypeAlias):
alias = base.node
target = get_proper_type(alias.target)
if isinstance(target, Instance):
name = target.type.fullname()
if (alias.no_args and # this avoids bogus errors for already reported aliases
name in nongen_builtins and not alias.normalized):
self.fail(no_subscript_builtin_alias(name, propose_alt=False), expr)
# ...or directly.
else:
n = self.lookup_type_node(base)
if n and n.fullname in nongen_builtins:
self.fail(no_subscript_builtin_alias(n.fullname, propose_alt=False), expr)
def analyze_type_application_args(self, expr: IndexExpr) -> Optional[List[Type]]:
"""Analyze type arguments (index) in a type application.
Return None if anything was incomplete.
"""
index = expr.index
tag = self.track_incomplete_refs()
self.analyze_type_expr(index)
if self.found_incomplete_ref(tag):
return None
types = [] # type: List[Type]
if isinstance(index, TupleExpr):
items = index.items
else:
items = [index]
for item in items:
try:
typearg = expr_to_unanalyzed_type(item)
except TypeTranslationError:
self.fail('Type expected within [...]', expr)
return None
# We always allow unbound type variables in IndexExpr, since we
# may be analysing a type alias definition rvalue. The error will be
# reported elsewhere if it is not the case.
analyzed = self.anal_type(typearg, allow_unbound_tvars=True,
allow_placeholder=True)
if analyzed is None:
return None
types.append(analyzed)
return types
def visit_slice_expr(self, expr: SliceExpr) -> None:
if expr.begin_index:
expr.begin_index.accept(self)
if expr.end_index:
expr.end_index.accept(self)
if expr.stride:
expr.stride.accept(self)
def visit_cast_expr(self, expr: CastExpr) -> None:
expr.expr.accept(self)
analyzed = self.anal_type(expr.type)
if analyzed is not None:
expr.type = analyzed
def visit_reveal_expr(self, expr: RevealExpr) -> None:
if expr.kind == REVEAL_TYPE:
if expr.expr is not None:
expr.expr.accept(self)
else:
# Reveal locals doesn't have an inner expression, there's no
# need to traverse inside it
pass
def visit_type_application(self, expr: TypeApplication) -> None:
expr.expr.accept(self)
for i in range(len(expr.types)):
analyzed = self.anal_type(expr.types[i])
if analyzed is not None:
expr.types[i] = analyzed
def visit_list_comprehension(self, expr: ListComprehension) -> None:
expr.generator.accept(self)
def visit_set_comprehension(self, expr: SetComprehension) -> None:
expr.generator.accept(self)
def visit_dictionary_comprehension(self, expr: DictionaryComprehension) -> None:
self.enter(expr)
self.analyze_comp_for(expr)
expr.key.accept(self)
expr.value.accept(self)
self.leave()
self.analyze_comp_for_2(expr)
def visit_generator_expr(self, expr: GeneratorExpr) -> None:
self.enter(expr)
self.analyze_comp_for(expr)
expr.left_expr.accept(self)
self.leave()
self.analyze_comp_for_2(expr)
def analyze_comp_for(self, expr: Union[GeneratorExpr,
DictionaryComprehension]) -> None:
"""Analyses the 'comp_for' part of comprehensions (part 1).
That is the part after 'for' in (x for x in l if p). This analyzes
variables and conditions which are analyzed in a local scope.
"""
for i, (index, sequence, conditions) in enumerate(zip(expr.indices,
expr.sequences,
expr.condlists)):
if i > 0:
sequence.accept(self)
# Bind index variables.
self.analyze_lvalue(index)
for cond in conditions:
cond.accept(self)
def analyze_comp_for_2(self, expr: Union[GeneratorExpr,
DictionaryComprehension]) -> None:
"""Analyses the 'comp_for' part of comprehensions (part 2).
That is the part after 'for' in (x for x in l if p). This analyzes
the 'l' part which is analyzed in the surrounding scope.
"""
expr.sequences[0].accept(self)
def visit_lambda_expr(self, expr: LambdaExpr) -> None:
self.analyze_arg_initializers(expr)
self.analyze_function_body(expr)
def visit_conditional_expr(self, expr: ConditionalExpr) -> None:
expr.if_expr.accept(self)
expr.cond.accept(self)
expr.else_expr.accept(self)
def visit_backquote_expr(self, expr: BackquoteExpr) -> None:
expr.expr.accept(self)
def visit__promote_expr(self, expr: PromoteExpr) -> None:
analyzed = self.anal_type(expr.type)
if analyzed is not None:
expr.type = analyzed
def visit_yield_expr(self, expr: YieldExpr) -> None:
if not self.is_func_scope():
self.fail("'yield' outside function", expr, serious=True, blocker=True)
else:
if self.function_stack[-1].is_coroutine:
if self.options.python_version < (3, 6):
self.fail("'yield' in async function", expr, serious=True, blocker=True)
else:
self.function_stack[-1].is_generator = True
self.function_stack[-1].is_async_generator = True
else:
self.function_stack[-1].is_generator = True
if expr.expr:
expr.expr.accept(self)
def visit_await_expr(self, expr: AwaitExpr) -> None:
if not self.is_func_scope():
self.fail("'await' outside function", expr)
elif not self.function_stack[-1].is_coroutine:
self.fail("'await' outside coroutine ('async def')", expr)
expr.expr.accept(self)
#
# Lookup functions
#
def lookup(self, name: str, ctx: Context,
suppress_errors: bool = False) -> Optional[SymbolTableNode]:
"""Look up an unqualified (no dots) name in all active namespaces.
Note that the result may contain a PlaceholderNode. The caller may
want to defer in that case.
Generate an error if the name is not defined unless suppress_errors
is true or the current namespace is incomplete. In the latter case
defer.
"""
implicit_name = False
# 1a. Name declared using 'global x' takes precedence
if name in self.global_decls[-1]:
if name in self.globals:
return self.globals[name]
if not suppress_errors:
self.name_not_defined(name, ctx)
return None
# 1b. Name declared using 'nonlocal x' takes precedence
if name in self.nonlocal_decls[-1]:
for table in reversed(self.locals[:-1]):
if table is not None and name in table:
return table[name]
else:
if not suppress_errors:
self.name_not_defined(name, ctx)
return None
# 2. Class attributes (if within class definition)
if self.type and not self.is_func_scope() and name in self.type.names:
node = self.type.names[name]
if not node.implicit:
if self.is_active_symbol_in_class_body(node.node):
return node
else:
# Defined through self.x assignment
implicit_name = True
implicit_node = node
# 3. Local (function) scopes
for table in reversed(self.locals):
if table is not None and name in table:
return table[name]
# 4. Current file global scope
if name in self.globals:
return self.globals[name]
# 5. Builtins
b = self.globals.get('__builtins__', None)
if b:
assert isinstance(b.node, MypyFile)
table = b.node.names
if name in table:
if name[0] == "_" and name[1] != "_":
if not suppress_errors:
self.name_not_defined(name, ctx)
return None
node = table[name]
return node
# Give up.
if not implicit_name and not suppress_errors:
self.name_not_defined(name, ctx)
else:
if implicit_name:
return implicit_node
return None
def is_active_symbol_in_class_body(self, node: Optional[SymbolNode]) -> bool:
"""Can a symbol defined in class body accessed at current statement?
Only allow access to class attributes textually after
the definition, so that it's possible to fall back to the
outer scope. Example:
class X: ...
class C:
X = X # Initializer refers to outer scope
Nested classes are an exception, since we want to support
arbitrary forward references in type annotations.
"""
# TODO: Forward reference to name imported in class body is not
# caught.
assert self.statement # we are at class scope
return (node is None
or node.line < self.statement.line
or not self.is_defined_in_current_module(node.fullname())
or isinstance(node, TypeInfo)
or (isinstance(node, PlaceholderNode) and node.becomes_typeinfo))
def is_defined_in_current_module(self, fullname: Optional[str]) -> bool:
if fullname is None:
return False
return module_prefix(self.modules, fullname) == self.cur_mod_id
def lookup_qualified(self, name: str, ctx: Context,
suppress_errors: bool = False) -> Optional[SymbolTableNode]:
"""Lookup a qualified name in all activate namespaces.
Note that the result may contain a PlaceholderNode. The caller may
want to defer in that case.
Generate an error if the name is not defined unless suppress_errors
is true or the current namespace is incomplete. In the latter case
defer.
"""
if '.' not in name:
# Simple case: look up a short name.
return self.lookup(name, ctx, suppress_errors=suppress_errors)
parts = name.split('.')
namespace = self.cur_mod_id
sym = self.lookup(parts[0], ctx, suppress_errors=suppress_errors)
if sym:
for i in range(1, len(parts)):
node = sym.node
part = parts[i]
if isinstance(node, TypeInfo):
nextsym = node.get(part)
elif isinstance(node, MypyFile):
nextsym = self.get_module_symbol(node, part)
namespace = node.fullname()
elif isinstance(node, PlaceholderNode):
return sym
else:
if isinstance(node, Var):
typ = get_proper_type(node.type)
if isinstance(typ, AnyType):
# Allow access through Var with Any type without error.
return self.implicit_symbol(sym, name, parts[i:], typ)
# Lookup through invalid node, such as variable or function
nextsym = None
if not nextsym or nextsym.module_hidden:
if not suppress_errors:
self.name_not_defined(name, ctx, namespace=namespace)
return None
sym = nextsym
return sym
def lookup_type_node(self, expr: Expression) -> Optional[SymbolTableNode]:
try:
t = expr_to_unanalyzed_type(expr)
except TypeTranslationError:
return None
if isinstance(t, UnboundType):
n = self.lookup_qualified(t.name, expr, suppress_errors=True)
return n
return None
def get_module_symbol(self, node: MypyFile, name: str) -> Optional[SymbolTableNode]:
"""Look up a symbol from a module.
Return None if no matching symbol could be bound.
"""
module = node.fullname()
names = node.names
sym = names.get(name)
if not sym:
fullname = module + '.' + name
if fullname in self.modules:
sym = SymbolTableNode(GDEF, self.modules[fullname])
elif self.is_incomplete_namespace(module):
self.record_incomplete_ref()
elif ('__getattr__' in names
and (node.is_stub
or self.options.python_version >= (3, 7))):
gvar = self.create_getattr_var(names['__getattr__'], name, fullname)
if gvar:
sym = SymbolTableNode(GDEF, gvar)
elif self.is_missing_module(fullname):
# We use the fullname of the original definition so that we can
# detect whether two names refer to the same thing.
var_type = AnyType(TypeOfAny.from_unimported_type)
v = Var(name, type=var_type)
v._fullname = fullname
sym = SymbolTableNode(GDEF, v)
elif sym.module_hidden:
sym = None
return sym
def is_missing_module(self, module: str) -> bool:
return module in self.missing_modules
def implicit_symbol(self, sym: SymbolTableNode, name: str, parts: List[str],
source_type: AnyType) -> SymbolTableNode:
"""Create symbol for a qualified name reference through Any type."""
if sym.node is None:
basename = None
else:
basename = sym.node.fullname()
if basename is None:
fullname = name
else:
fullname = basename + '.' + '.'.join(parts)
var_type = AnyType(TypeOfAny.from_another_any, source_type)
var = Var(parts[-1], var_type)
var._fullname = fullname
return SymbolTableNode(GDEF, var)
def create_getattr_var(self, getattr_defn: SymbolTableNode,
name: str, fullname: str) -> Optional[Var]:
"""Create a dummy variable using module-level __getattr__ return type.
If not possible, return None.
Note that multiple Var nodes can be created for a single name. We
can use the from_module_getattr and the fullname attributes to
check if two dummy Var nodes refer to the same thing. Reusing Var
nodes would require non-local mutable state, which we prefer to
avoid.
"""
if isinstance(getattr_defn.node, (FuncDef, Var)):
node_type = get_proper_type(getattr_defn.node.type)
if isinstance(node_type, CallableType):
typ = node_type.ret_type
else:
typ = AnyType(TypeOfAny.from_error)
v = Var(name, type=typ)
v._fullname = fullname
v.from_module_getattr = True
return v
return None
def lookup_fully_qualified(self, name: str) -> SymbolTableNode:
"""Lookup a fully qualified name.
Assume that the name is defined. This happens in the global namespace --
the local module namespace is ignored.
Note that this doesn't support visibility, module-level __getattr__, or
nested classes.
"""
parts = name.split('.')
n = self.modules[parts[0]]
for i in range(1, len(parts) - 1):
next_sym = n.names[parts[i]]
assert isinstance(next_sym.node, MypyFile)
n = next_sym.node
return n.names[parts[-1]]
def lookup_fully_qualified_or_none(self, fullname: str) -> Optional[SymbolTableNode]:
"""Lookup a fully qualified name that refers to a module-level definition.
Don't assume that the name is defined. This happens in the global namespace --
the local module namespace is ignored. This does not dereference indirect
refs.
Note that this can't be used for names nested in class namespaces.
"""
# TODO: unify/clean-up/simplify lookup methods, see #4157.
# TODO: support nested classes (but consider performance impact,
# we might keep the module level only lookup for thing like 'builtins.int').
assert '.' in fullname
module, name = fullname.rsplit('.', maxsplit=1)
if module not in self.modules:
return None
filenode = self.modules[module]
result = filenode.names.get(name)
if result is None and self.is_incomplete_namespace(module):
# TODO: More explicit handling of incomplete refs?
self.record_incomplete_ref()
return result
def builtin_type(self, fully_qualified_name: str) -> Instance:
sym = self.lookup_fully_qualified(fully_qualified_name)
node = sym.node
assert isinstance(node, TypeInfo)
return Instance(node, [AnyType(TypeOfAny.special_form)] * len(node.defn.type_vars))
def object_type(self) -> Instance:
return self.named_type('__builtins__.object')
def str_type(self) -> Instance:
return self.named_type('__builtins__.str')
def named_type(self, qualified_name: str, args: Optional[List[Type]] = None) -> Instance:
sym = self.lookup_qualified(qualified_name, Context())
assert sym, "Internal error: attempted to construct unknown type"
node = sym.node
assert isinstance(node, TypeInfo)
if args:
# TODO: assert len(args) == len(node.defn.type_vars)
return Instance(node, args)
return Instance(node, [AnyType(TypeOfAny.special_form)] * len(node.defn.type_vars))
def named_type_or_none(self, qualified_name: str,
args: Optional[List[Type]] = None) -> Optional[Instance]:
sym = self.lookup_fully_qualified_or_none(qualified_name)
if not sym or isinstance(sym.node, PlaceholderNode):
return None
node = sym.node
if isinstance(node, TypeAlias):
assert isinstance(node.target, Instance) # type: ignore
node = node.target.type
assert isinstance(node, TypeInfo), node
if args is not None:
# TODO: assert len(args) == len(node.defn.type_vars)
return Instance(node, args)
return Instance(node, [AnyType(TypeOfAny.unannotated)] * len(node.defn.type_vars))
def lookup_current_scope(self, name: str) -> Optional[SymbolTableNode]:
if self.locals[-1] is not None:
return self.locals[-1].get(name)
elif self.type is not None:
return self.type.names.get(name)
else:
return self.globals.get(name)
#
# Adding symbols
#
def add_symbol(self,
name: str,
node: SymbolNode,
context: Context,
module_public: bool = True,
module_hidden: bool = False,
can_defer: bool = True,
escape_comprehensions: bool = False) -> bool:
"""Add symbol to the currently active symbol table.
Generally additions to symbol table should go through this method or
one of the methods below so that kinds, redefinitions, conditional
definitions, and skipped names are handled consistently.
Return True if we actually added the symbol, or False if we refused to do so
(because something is not ready).
If can_defer is True, defer current target if adding a placeholder.
"""
if self.is_func_scope():
kind = LDEF
elif self.type is not None:
kind = MDEF
else:
kind = GDEF
symbol = SymbolTableNode(kind,
node,
module_public=module_public,
module_hidden=module_hidden)
return self.add_symbol_table_node(name, symbol, context, can_defer, escape_comprehensions)
def add_symbol_skip_local(self, name: str, node: SymbolNode) -> None:
"""Same as above, but skipping the local namespace.
This doesn't check for previous definition and is only used
for serialization of method-level classes.
Classes defined within methods can be exposed through an
attribute type, but method-level symbol tables aren't serialized.
This method can be used to add such classes to an enclosing,
serialized symbol table.
"""
# TODO: currently this is only used by named tuples. Use this method
# also by typed dicts and normal classes, see issue #6422.
if self.type is not None:
names = self.type.names
kind = MDEF
else:
names = self.globals
kind = GDEF
symbol = SymbolTableNode(kind, node)
names[name] = symbol
def add_symbol_table_node(self,
name: str,
symbol: SymbolTableNode,
context: Optional[Context] = None,
can_defer: bool = True,
escape_comprehensions: bool = False) -> bool:
"""Add symbol table node to the currently active symbol table.
Return True if we actually added the symbol, or False if we refused
to do so (because something is not ready or it was a no-op).
Generate an error if there is an invalid redefinition.
If context is None, unconditionally add node, since we can't report
an error. Note that this is used by plugins to forcibly replace nodes!
TODO: Prevent plugins from replacing nodes, as it could cause problems?
Args:
name: short name of symbol
symbol: Node to add
can_defer: if True, defer current target if adding a placeholder
context: error context (see above about None value)
"""
names = self.current_symbol_table(escape_comprehensions=escape_comprehensions)
existing = names.get(name)
if isinstance(symbol.node, PlaceholderNode) and can_defer:
self.defer(context)
if (existing is not None
and context is not None
and not is_valid_replacement(existing, symbol)):
# There is an existing node, so this may be a redefinition.
# If the new node points to the same node as the old one,
# or if both old and new nodes are placeholders, we don't
# need to do anything.
old = existing.node
new = symbol.node
if isinstance(new, PlaceholderNode):
# We don't know whether this is okay. Let's wait until the next iteration.
return False
if not is_same_symbol(old, new):
if isinstance(new, (FuncDef, Decorator, OverloadedFuncDef, TypeInfo)):
self.add_redefinition(names, name, symbol)
if not (isinstance(new, (FuncDef, Decorator))
and self.set_original_def(old, new)):
self.name_already_defined(name, context, existing)
elif name not in self.missing_names and '*' not in self.missing_names:
names[name] = symbol
self.progress = True
return True
return False
def add_redefinition(self,
names: SymbolTable,
name: str,
symbol: SymbolTableNode) -> None:
"""Add a symbol table node that reflects a redefinition as a function or a class.
Redefinitions need to be added to the symbol table so that they can be found
through AST traversal, but they have dummy names of form 'name-redefinition[N]',
where N ranges over 2, 3, ... (omitted for the first redefinition).
Note: we always store redefinitions independently of whether they are valid or not
(so they will be semantically analyzed), the caller should give an error for invalid
redefinitions (such as e.g. variable redefined as a class).
"""
i = 1
# Don't serialize redefined nodes. They are likely to have
# busted internal references which can cause problems with
# serialization and they can't have any external references to
# them.
symbol.no_serialize = True
while True:
if i == 1:
new_name = '{}-redefinition'.format(name)
else:
new_name = '{}-redefinition{}'.format(name, i)
existing = names.get(new_name)
if existing is None:
names[new_name] = symbol
return
elif existing.node is symbol.node:
# Already there
return
i += 1
def add_module_symbol(self,
id: str,
as_id: str,
module_public: bool,
context: Context,
module_hidden: bool = False) -> None:
"""Add symbol that is a reference to a module object."""
if id in self.modules:
node = self.modules[id]
self.add_symbol(as_id, node, context,
module_public=module_public,
module_hidden=module_hidden)
else:
self.add_unknown_imported_symbol(as_id, context, target_name=id)
def add_local(self, node: Union[Var, FuncDef, OverloadedFuncDef], context: Context) -> None:
"""Add local variable or function."""
assert self.is_func_scope()
name = node.name()
node._fullname = name
self.add_symbol(name, node, context)
def add_imported_symbol(self,
name: str,
node: SymbolTableNode,
context: Context,
module_public: bool = True,
module_hidden: bool = False) -> None:
"""Add an alias to an existing symbol through import."""
symbol = SymbolTableNode(node.kind, node.node,
module_public=module_public,
module_hidden=module_hidden)
self.add_symbol_table_node(name, symbol, context)
def add_unknown_imported_symbol(self,
name: str,
context: Context,
target_name: Optional[str] = None) -> None:
"""Add symbol that we don't know what it points to because resolving an import failed.
This can happen if a module is missing, or it is present, but doesn't have
the imported attribute. The `target_name` is the name of symbol in the namespace
it is imported from. For example, for 'from mod import x as y' the target_name is
'mod.x'. This is currently used only to track logical dependencies.
"""
existing = self.current_symbol_table().get(name)
if existing and isinstance(existing.node, Var) and existing.node.is_suppressed_import:
# This missing import was already added -- nothing to do here.
return
var = Var(name)
if self.options.logical_deps and target_name is not None:
# This makes it possible to add logical fine-grained dependencies
# from a missing module. We can't use this by default, since in a
# few places we assume that the full name points to a real
# definition, but this name may point to nothing.
var._fullname = target_name
elif self.type:
var._fullname = self.type.fullname() + "." + name
var.info = self.type
else:
var._fullname = self.qualified_name(name)
var.is_ready = True
any_type = AnyType(TypeOfAny.from_unimported_type, missing_import_name=var._fullname)
var.type = any_type
var.is_suppressed_import = True
self.add_symbol(name, var, context)
#
# Other helpers
#
@contextmanager
def tvar_scope_frame(self, frame: TypeVarScope) -> Iterator[None]:
old_scope = self.tvar_scope
self.tvar_scope = frame
yield
self.tvar_scope = old_scope
def defer(self, debug_context: Optional[Context] = None) -> None:
"""Defer current analysis target to be analyzed again.
This must be called if something in the current target is
incomplete or has a placeholder node. However, this must *not*
be called during the final analysis iteration! Instead, an error
should be generated. Often 'process_placeholder' is a good
way to either defer or generate an error.
NOTE: Some methods, such as 'anal_type', 'mark_incomplete' and
'record_incomplete_ref', call this implicitly, or when needed.
They are usually preferable to a direct defer() call.
"""
assert not self.final_iteration, 'Must not defer during final iteration'
self.deferred = True
# Store debug info for this deferral.
line = (debug_context.line if debug_context else
self.statement.line if self.statement else -1)
self.deferral_debug_context.append((self.cur_mod_id, line))
def track_incomplete_refs(self) -> Tag:
"""Return tag that can be used for tracking references to incomplete names."""
return self.num_incomplete_refs
def found_incomplete_ref(self, tag: Tag) -> bool:
"""Have we encountered an incomplete reference since starting tracking?"""
return self.num_incomplete_refs != tag
def record_incomplete_ref(self) -> None:
"""Record the encounter of an incomplete reference and defer current analysis target."""
self.defer()
self.num_incomplete_refs += 1
def mark_incomplete(self, name: str, node: Node,
becomes_typeinfo: bool = False) -> None:
"""Mark a definition as incomplete (and defer current analysis target).
Also potentially mark the current namespace as incomplete.
Args:
name: The name that we weren't able to define (or '*' if the name is unknown)
node: The node that refers to the name (definition or lvalue)
becomes_typeinfo: Pass this to PlaceholderNode (used by special forms like
named tuples that will create TypeInfos).
"""
self.defer(node)
if name == '*':
self.incomplete = True
elif not self.is_global_or_nonlocal(name):
fullname = self.qualified_name(name)
assert self.statement
placeholder = PlaceholderNode(fullname, node, self.statement.line,
becomes_typeinfo=becomes_typeinfo)
self.add_symbol(name, placeholder, context=dummy_context())
self.missing_names.add(name)
def is_incomplete_namespace(self, fullname: str) -> bool:
"""Is a module or class namespace potentially missing some definitions?
If a name is missing from an incomplete namespace, we'll need to defer the
current analysis target.
"""
return fullname in self.incomplete_namespaces
def process_placeholder(self, name: str, kind: str, ctx: Context) -> None:
"""Process a reference targeting placeholder node.
If this is not a final iteration, defer current node,
otherwise report an error.
The 'kind' argument indicates if this a name or attribute expression
(used for better error message).
"""
if self.final_iteration:
self.cannot_resolve_name(name, kind, ctx)
else:
self.defer(ctx)
def cannot_resolve_name(self, name: str, kind: str, ctx: Context) -> None:
self.fail('Cannot resolve {} "{}" (possible cyclic definition)'.format(kind, name), ctx)
def qualified_name(self, name: str) -> str:
if self.type is not None:
return self.type._fullname + '.' + name
elif self.is_func_scope():
return name
else:
return self.cur_mod_id + '.' + name
def enter(self, function: Union[FuncItem, GeneratorExpr, DictionaryComprehension]) -> None:
"""Enter a function, generator or comprehension scope."""
names = self.saved_locals.setdefault(function, SymbolTable())
self.locals.append(names)
is_comprehension = isinstance(function, (GeneratorExpr, DictionaryComprehension))
self.is_comprehension_stack.append(is_comprehension)
self.global_decls.append(set())
self.nonlocal_decls.append(set())
# -1 since entering block will increment this to 0.
self.block_depth.append(-1)
def leave(self) -> None:
self.locals.pop()
self.is_comprehension_stack.pop()
self.global_decls.pop()
self.nonlocal_decls.pop()
self.block_depth.pop()
def is_func_scope(self) -> bool:
return self.locals[-1] is not None
def is_nested_within_func_scope(self) -> bool:
"""Are we underneath a function scope, even if we are in a nested class also?"""
return any(l is not None for l in self.locals)
def is_class_scope(self) -> bool:
return self.type is not None and not self.is_func_scope()
def is_module_scope(self) -> bool:
return not (self.is_class_scope() or self.is_func_scope())
def current_symbol_kind(self) -> int:
if self.is_class_scope():
kind = MDEF
elif self.is_func_scope():
kind = LDEF
else:
kind = GDEF
return kind
def current_symbol_table(self, escape_comprehensions: bool = False) -> SymbolTable:
if self.is_func_scope():
assert self.locals[-1] is not None
if escape_comprehensions:
for i, is_comprehension in enumerate(reversed(self.is_comprehension_stack)):
if not is_comprehension:
names = self.locals[-1 - i]
break
else:
assert False, "Should have at least one non-comprehension scope"
else:
names = self.locals[-1]
assert names is not None
elif self.type is not None:
names = self.type.names
else:
names = self.globals
return names
def is_global_or_nonlocal(self, name: str) -> bool:
return (self.is_func_scope()
and (name in self.global_decls[-1]
or name in self.nonlocal_decls[-1]))
def add_exports(self, exp_or_exps: Union[Iterable[Expression], Expression]) -> None:
exps = [exp_or_exps] if isinstance(exp_or_exps, Expression) else exp_or_exps
for exp in exps:
if isinstance(exp, StrExpr):
self.all_exports.append(exp.value)
def check_no_global(self,
name: str,
ctx: Context,
is_overloaded_func: bool = False) -> None:
if name in self.globals:
prev_is_overloaded = isinstance(self.globals[name], OverloadedFuncDef)
if is_overloaded_func and prev_is_overloaded:
self.fail("Nonconsecutive overload {} found".format(name), ctx)
elif prev_is_overloaded:
self.fail("Definition of '{}' missing 'overload'".format(name), ctx)
else:
self.name_already_defined(name, ctx, self.globals[name])
def name_not_defined(self, name: str, ctx: Context, namespace: Optional[str] = None) -> None:
if self.is_incomplete_namespace(namespace or self.cur_mod_id):
# Target namespace is incomplete, so it's possible that the name will be defined
# later on. Defer current target.
self.record_incomplete_ref()
return
message = "Name '{}' is not defined".format(name)
self.fail(message, ctx, code=codes.NAME_DEFINED)
if 'builtins.{}'.format(name) in SUGGESTED_TEST_FIXTURES:
# The user probably has a missing definition in a test fixture. Let's verify.
fullname = 'builtins.{}'.format(name)
if self.lookup_fully_qualified_or_none(fullname) is None:
# Yes. Generate a helpful note.
self.add_fixture_note(fullname, ctx)
modules_with_unimported_hints = {
name.split('.', 1)[0]
for name in TYPES_FOR_UNIMPORTED_HINTS
}
lowercased = {
name.lower(): name
for name in TYPES_FOR_UNIMPORTED_HINTS
}
for module in modules_with_unimported_hints:
fullname = '{}.{}'.format(module, name).lower()
if fullname not in lowercased:
continue
# User probably forgot to import these types.
hint = (
'Did you forget to import it from "{module}"?'
' (Suggestion: "from {module} import {name}")'
).format(module=module, name=lowercased[fullname].rsplit('.', 1)[-1])
self.note(hint, ctx, code=codes.NAME_DEFINED)
def already_defined(self,
name: str,
ctx: Context,
original_ctx: Optional[Union[SymbolTableNode, SymbolNode]],
noun: str) -> None:
if isinstance(original_ctx, SymbolTableNode):
node = original_ctx.node # type: Optional[SymbolNode]
elif isinstance(original_ctx, SymbolNode):
node = original_ctx
else:
node = None
if isinstance(original_ctx, SymbolTableNode) and isinstance(original_ctx.node, MypyFile):
# Since this is an import, original_ctx.node points to the module definition.
# Therefore its line number is always 1, which is not useful for this
# error message.
extra_msg = ' (by an import)'
elif node and node.line != -1 and self.is_local_name(node.fullname()):
# TODO: Using previous symbol node may give wrong line. We should use
# the line number where the binding was established instead.
extra_msg = ' on line {}'.format(node.line)
else:
extra_msg = ' (possibly by an import)'
self.fail("{} '{}' already defined{}".format(noun, unmangle(name), extra_msg), ctx,
code=codes.NO_REDEF)
def name_already_defined(self,
name: str,
ctx: Context,
original_ctx: Optional[Union[SymbolTableNode, SymbolNode]] = None
) -> None:
self.already_defined(name, ctx, original_ctx, noun='Name')
def attribute_already_defined(self,
name: str,
ctx: Context,
original_ctx: Optional[Union[SymbolTableNode, SymbolNode]] = None
) -> None:
self.already_defined(name, ctx, original_ctx, noun='Attribute')
def is_local_name(self, name: str) -> bool:
"""Does name look like reference to a definition in the current module?"""
return self.is_defined_in_current_module(name) or '.' not in name
def fail(self,
msg: str,
ctx: Context,
serious: bool = False,
*,
code: Optional[ErrorCode] = None,
blocker: bool = False) -> None:
if (not serious and
not self.options.check_untyped_defs and
self.function_stack and
self.function_stack[-1].is_dynamic()):
return
# In case it's a bug and we don't really have context
assert ctx is not None, msg
self.errors.report(ctx.get_line(), ctx.get_column(), msg, blocker=blocker, code=code)
def fail_blocker(self, msg: str, ctx: Context) -> None:
self.fail(msg, ctx, blocker=True)
def note(self, msg: str, ctx: Context, code: Optional[ErrorCode] = None) -> None:
if (not self.options.check_untyped_defs and
self.function_stack and
self.function_stack[-1].is_dynamic()):
return
self.errors.report(ctx.get_line(), ctx.get_column(), msg, severity='note', code=code)
def accept(self, node: Node) -> None:
try:
node.accept(self)
except Exception as err:
report_internal_error(err, self.errors.file, node.line, self.errors, self.options)
def expr_to_analyzed_type(self,
expr: Expression,
report_invalid_types: bool = True,
allow_placeholder: bool = False) -> Optional[Type]:
if isinstance(expr, CallExpr):
expr.accept(self)
is_named_tuple, info = self.named_tuple_analyzer.check_namedtuple(expr, None,
self.is_func_scope())
if not is_named_tuple:
# Some form of namedtuple is the only valid type that looks like a call
# expression. This isn't a valid type.
raise TypeTranslationError()
elif not info:
self.defer(expr)
return None
assert info.tuple_type, "NamedTuple without tuple type"
fallback = Instance(info, [])
return TupleType(info.tuple_type.items, fallback=fallback)
typ = expr_to_unanalyzed_type(expr)
return self.anal_type(typ, report_invalid_types=report_invalid_types,
allow_placeholder=allow_placeholder)
def analyze_type_expr(self, expr: Expression) -> None:
# There are certain expressions that mypy does not need to semantically analyze,
# since they analyzed solely as type. (For example, indexes in type alias definitions
# and base classes in class defs). External consumers of the mypy AST may need
# them semantically analyzed, however, if they need to treat it as an expression
# and not a type. (Which is to say, mypyc needs to do this.) Do the analysis
# in a fresh tvar scope in order to suppress any errors about using type variables.
with self.tvar_scope_frame(TypeVarScope()):
expr.accept(self)
def type_analyzer(self, *,
tvar_scope: Optional[TypeVarScope] = None,
allow_tuple_literal: bool = False,
allow_unbound_tvars: bool = False,
allow_placeholder: bool = False,
report_invalid_types: bool = True) -> TypeAnalyser:
if tvar_scope is None:
tvar_scope = self.tvar_scope
tpan = TypeAnalyser(self,
tvar_scope,
self.plugin,
self.options,
self.is_typeshed_stub_file,
allow_unbound_tvars=allow_unbound_tvars,
allow_tuple_literal=allow_tuple_literal,
report_invalid_types=report_invalid_types,
allow_unnormalized=self.is_stub_file,
allow_placeholder=allow_placeholder)
tpan.in_dynamic_func = bool(self.function_stack and self.function_stack[-1].is_dynamic())
tpan.global_scope = not self.type and not self.function_stack
return tpan
def anal_type(self,
typ: Type, *,
tvar_scope: Optional[TypeVarScope] = None,
allow_tuple_literal: bool = False,
allow_unbound_tvars: bool = False,
allow_placeholder: bool = False,
report_invalid_types: bool = True,
third_pass: bool = False) -> Optional[Type]:
"""Semantically analyze a type.
Args:
typ: Type to analyze (if already analyzed, this is a no-op)
allow_placeholder: If True, may return PlaceholderType if
encountering an incomplete definition
third_pass: Unused; only for compatibility with old semantic
analyzer
Return None only if some part of the type couldn't be bound *and* it
referred to an incomplete namespace or definition. In this case also
defer as needed. During a final iteration this won't return None;
instead report an error if the type can't be analyzed and return
AnyType.
In case of other errors, report an error message and return AnyType.
NOTE: The caller shouldn't defer even if this returns None or a
placeholder type.
"""
a = self.type_analyzer(tvar_scope=tvar_scope,
allow_unbound_tvars=allow_unbound_tvars,
allow_tuple_literal=allow_tuple_literal,
allow_placeholder=allow_placeholder,
report_invalid_types=report_invalid_types)
tag = self.track_incomplete_refs()
typ = typ.accept(a)
if self.found_incomplete_ref(tag):
# Something could not be bound yet.
return None
self.add_type_alias_deps(a.aliases_used)
return typ
def class_type(self, self_type: Type) -> Type:
return TypeType.make_normalized(self_type)
def schedule_patch(self, priority: int, patch: Callable[[], None]) -> None:
self.patches.append((priority, patch))
def report_hang(self) -> None:
print('Deferral trace:')
for mod, line in self.deferral_debug_context:
print(' {}:{}'.format(mod, line))
self.errors.report(-1, -1,
'INTERNAL ERROR: maximum semantic analysis iteration count reached',
blocker=True)
def add_plugin_dependency(self, trigger: str, target: Optional[str] = None) -> None:
"""Add dependency from trigger to a target.
If the target is not given explicitly, use the current target.
"""
if target is None:
target = self.scope.current_target()
self.cur_mod_node.plugin_deps.setdefault(trigger, set()).add(target)
def add_type_alias_deps(self,
aliases_used: Iterable[str],
target: Optional[str] = None) -> None:
"""Add full names of type aliases on which the current node depends.
This is used by fine-grained incremental mode to re-check the corresponding nodes.
If `target` is None, then the target node used will be the current scope.
"""
if not aliases_used:
# A basic optimization to avoid adding targets with no dependencies to
# the `alias_deps` dict.
return
if target is None:
target = self.scope.current_target()
self.cur_mod_node.alias_deps[target].update(aliases_used)
def is_mangled_global(self, name: str) -> bool:
# A global is mangled if there exists at least one renamed variant.
return unmangle(name) + "'" in self.globals
def is_initial_mangled_global(self, name: str) -> bool:
# If there are renamed definitions for a global, the first one has exactly one prime.
return name == unmangle(name) + "'"
def parse_bool(self, expr: Expression) -> Optional[bool]:
if isinstance(expr, NameExpr):
if expr.fullname == 'builtins.True':
return True
if expr.fullname == 'builtins.False':
return False
return None
class HasPlaceholders(TypeQuery[bool]):
def __init__(self) -> None:
super().__init__(any)
def visit_placeholder_type(self, t: PlaceholderType) -> bool:
return True
def has_placeholder(typ: Type) -> bool:
"""Check if a type contains any placeholder types (recursively)."""
return typ.accept(HasPlaceholders())
def replace_implicit_first_type(sig: FunctionLike, new: Type) -> FunctionLike:
if isinstance(sig, CallableType):
if len(sig.arg_types) == 0:
return sig
return sig.copy_modified(arg_types=[new] + sig.arg_types[1:])
elif isinstance(sig, Overloaded):
return Overloaded([cast(CallableType, replace_implicit_first_type(i, new))
for i in sig.items()])
else:
assert False
def refers_to_fullname(node: Expression, fullname: str) -> bool:
"""Is node a name or member expression with the given full name?"""
if not isinstance(node, RefExpr):
return False
if node.fullname == fullname:
return True
if isinstance(node.node, TypeAlias):
target = get_proper_type(node.node.target)
if isinstance(target, Instance) and target.type.fullname() == fullname:
return True
return False
def refers_to_class_or_function(node: Expression) -> bool:
"""Does semantically analyzed node refer to a class?"""
return (isinstance(node, RefExpr) and
isinstance(node.node, (TypeInfo, FuncDef, OverloadedFuncDef)))
def find_duplicate(list: List[T]) -> Optional[T]:
"""If the list has duplicates, return one of the duplicates.
Otherwise, return None.
"""
for i in range(1, len(list)):
if list[i] in list[:i]:
return list[i]
return None
def remove_imported_names_from_symtable(names: SymbolTable,
module: str) -> None:
"""Remove all imported names from the symbol table of a module."""
removed = [] # type: List[str]
for name, node in names.items():
if node.node is None:
continue
fullname = node.node.fullname()
prefix = fullname[:fullname.rfind('.')]
if prefix != module:
removed.append(name)
for name in removed:
del names[name]
def make_any_non_explicit(t: Type) -> Type:
"""Replace all Any types within in with Any that has attribute 'explicit' set to False"""
return t.accept(MakeAnyNonExplicit())
class MakeAnyNonExplicit(TypeTranslator):
def visit_any(self, t: AnyType) -> Type:
if t.type_of_any == TypeOfAny.explicit:
return t.copy_modified(TypeOfAny.special_form)
return t
def apply_semantic_analyzer_patches(patches: List[Tuple[int, Callable[[], None]]]) -> None:
"""Call patch callbacks in the right order.
This should happen after semantic analyzer pass 3.
"""
patches_by_priority = sorted(patches, key=lambda x: x[0])
for priority, patch_func in patches_by_priority:
patch_func()
def names_modified_by_assignment(s: AssignmentStmt) -> List[NameExpr]:
"""Return all unqualified (short) names assigned to in an assignment statement."""
result = [] # type: List[NameExpr]
for lvalue in s.lvalues:
result += names_modified_in_lvalue(lvalue)
return result
def names_modified_in_lvalue(lvalue: Lvalue) -> List[NameExpr]:
"""Return all NameExpr assignment targets in an Lvalue."""
if isinstance(lvalue, NameExpr):
return [lvalue]
elif isinstance(lvalue, StarExpr):
return names_modified_in_lvalue(lvalue.expr)
elif isinstance(lvalue, (ListExpr, TupleExpr)):
result = [] # type: List[NameExpr]
for item in lvalue.items:
result += names_modified_in_lvalue(item)
return result
return []
def is_same_var_from_getattr(n1: Optional[SymbolNode], n2: Optional[SymbolNode]) -> bool:
"""Do n1 and n2 refer to the same Var derived from module-level __getattr__?"""
return (isinstance(n1, Var)
and n1.from_module_getattr
and isinstance(n2, Var)
and n2.from_module_getattr
and n1.fullname() == n2.fullname())
def dummy_context() -> Context:
return TempNode(AnyType(TypeOfAny.special_form))
def is_valid_replacement(old: SymbolTableNode, new: SymbolTableNode) -> bool:
"""Can symbol table node replace an existing one?
These are the only valid cases:
1. Placeholder gets replaced with a non-placeholder
2. Placeholder that isn't known to become type replaced with a
placeholder that can become a type
"""
if isinstance(old.node, PlaceholderNode):
if isinstance(new.node, PlaceholderNode):
return not old.node.becomes_typeinfo and new.node.becomes_typeinfo
else:
return True
return False
def is_same_symbol(a: Optional[SymbolNode], b: Optional[SymbolNode]) -> bool:
return (a == b
or (isinstance(a, PlaceholderNode)
and isinstance(b, PlaceholderNode))
or is_same_var_from_getattr(a, b))
| 45.18523
| 99
| 0.590864
|
from contextlib import contextmanager
from typing import (
List, Dict, Set, Tuple, cast, TypeVar, Union, Optional, Callable, Iterator, Iterable,
)
from typing_extensions import Final
from mypy.nodes import (
MypyFile, TypeInfo, Node, AssignmentStmt, FuncDef, OverloadedFuncDef,
ClassDef, Var, GDEF, FuncItem, Import, Expression, Lvalue,
ImportFrom, ImportAll, Block, LDEF, NameExpr, MemberExpr,
IndexExpr, TupleExpr, ListExpr, ExpressionStmt, ReturnStmt,
RaiseStmt, AssertStmt, OperatorAssignmentStmt, WhileStmt,
ForStmt, BreakStmt, ContinueStmt, IfStmt, TryStmt, WithStmt, DelStmt,
GlobalDecl, SuperExpr, DictExpr, CallExpr, RefExpr, OpExpr, UnaryExpr,
SliceExpr, CastExpr, RevealExpr, TypeApplication, Context, SymbolTable,
SymbolTableNode, ListComprehension, GeneratorExpr,
LambdaExpr, MDEF, Decorator, SetExpr, TypeVarExpr,
StrExpr, BytesExpr, PrintStmt, ConditionalExpr, PromoteExpr,
ComparisonExpr, StarExpr, ARG_POS, ARG_NAMED, type_aliases,
YieldFromExpr, NamedTupleExpr, NonlocalDecl, SymbolNode,
SetComprehension, DictionaryComprehension, TypeAlias, TypeAliasExpr,
YieldExpr, ExecStmt, BackquoteExpr, ImportBase, AwaitExpr,
IntExpr, FloatExpr, UnicodeExpr, TempNode, OverloadPart,
PlaceholderNode, COVARIANT, CONTRAVARIANT, INVARIANT,
nongen_builtins, get_member_expr_fullname, REVEAL_TYPE,
REVEAL_LOCALS, is_final_node, TypedDictExpr, type_aliases_target_versions,
EnumCallExpr, RUNTIME_PROTOCOL_DECOS, FakeExpression, Statement, AssignmentExpr,
)
from mypy.tvar_scope import TypeVarScope
from mypy.typevars import fill_typevars
from mypy.visitor import NodeVisitor
from mypy.errors import Errors, report_internal_error
from mypy.messages import best_matches, MessageBuilder, pretty_or
from mypy.errorcodes import ErrorCode
from mypy import message_registry, errorcodes as codes
from mypy.types import (
FunctionLike, UnboundType, TypeVarDef, TupleType, UnionType, StarType,
CallableType, Overloaded, Instance, Type, AnyType, LiteralType, LiteralValue,
TypeTranslator, TypeOfAny, TypeType, NoneType, PlaceholderType, TPDICT_NAMES, ProperType,
get_proper_type, get_proper_types
)
from mypy.typeops import function_type
from mypy.type_visitor import TypeQuery
from mypy.nodes import implicit_module_attrs
from mypy.typeanal import (
TypeAnalyser, analyze_type_alias, no_subscript_builtin_alias,
TypeVariableQuery, TypeVarList, remove_dups, has_any_from_unimported_type,
check_for_explicit_any, type_constructors, fix_instance_types
)
from mypy.exprtotype import expr_to_unanalyzed_type, TypeTranslationError
from mypy.options import Options
from mypy.plugin import (
Plugin, ClassDefContext, SemanticAnalyzerPluginInterface,
DynamicClassDefContext
)
from mypy.util import correct_relative_import, unmangle, module_prefix
from mypy.scope import Scope
from mypy.semanal_shared import (
SemanticAnalyzerInterface, set_callable_name, calculate_tuple_fallback, PRIORITY_FALLBACKS
)
from mypy.semanal_namedtuple import NamedTupleAnalyzer
from mypy.semanal_typeddict import TypedDictAnalyzer
from mypy.semanal_enum import EnumCallAnalyzer
from mypy.semanal_newtype import NewTypeAnalyzer
from mypy.reachability import (
infer_reachability_of_if_statement, infer_condition_value, ALWAYS_FALSE, ALWAYS_TRUE,
MYPY_TRUE, MYPY_FALSE
)
from mypy.mro import calculate_mro, MroError
T = TypeVar('T')
SUGGESTED_TEST_FIXTURES = {
'builtins.list': 'list.pyi',
'builtins.dict': 'dict.pyi',
'builtins.set': 'set.pyi',
'builtins.bool': 'bool.pyi',
'builtins.Exception': 'exception.pyi',
'builtins.BaseException': 'exception.pyi',
'builtins.isinstance': 'isinstancelist.pyi',
'builtins.property': 'property.pyi',
'builtins.classmethod': 'classmethod.pyi',
}
TYPES_FOR_UNIMPORTED_HINTS = {
'typing.Any',
'typing.Callable',
'typing.Dict',
'typing.Iterable',
'typing.Iterator',
'typing.List',
'typing.Optional',
'typing.Set',
'typing.Tuple',
'typing.TypeVar',
'typing.Union',
'typing.cast',
}
CORE_BUILTIN_CLASSES = ['object', 'bool', 'function']
Tag = int
class SemanticAnalyzer(NodeVisitor[None],
SemanticAnalyzerInterface,
SemanticAnalyzerPluginInterface):
modules = None
globals = None
global_decls = None
nonlocal_decls = None
locals = None
is_comprehension_stack = None
block_depth = None
type = None
type_stack = None
tvar_scope = None
options = None
function_stack = None
progress = False
deferred = False
incomplete = False
_final_iteration = False
# Note that missing names are per module, _not_ per namespace. This means that e.g.
# a missing name at global scope will block adding same name at a class scope.
# This should not affect correctness and is purely a performance issue,
# since it can cause unnecessary deferrals. These are represented as
# PlaceholderNodes in the symbol table. We use this to ensure that the first
# definition takes precedence even if it's incomplete.
missing_names = None
patches = None
loop_depth = 0
cur_mod_id = ''
is_stub_file = False
_is_typeshed_stub_file = False
imports = None ing_modules: Set[str],
incomplete_namespaces: Set[str],
errors: Errors,
plugin: Plugin) -> None:
self.locals = [None]
self.is_comprehension_stack = [False]
self.saved_locals = {} \
self.imports = set()
self.type = None
self.type_stack = []
self.tvar_scope = TypeVarScope()
self.function_stack = []
self.block_depth = [0]
self.loop_depth = 0
self.errors = errors
self.modules = modules
self.msg = MessageBuilder(errors, modules)
self.missing_modules = missing_modules
self.incomplete_namespaces = incomplete_namespaces
self.all_exports = [] # type: List[str]
# Map from module id to list of explicitly exported names (i.e. names in __all__).
self.export_map = {} # type: Dict[str, List[str]]
self.plugin = plugin
# If True, process function definitions. If False, don't. This is used
self.recurse_into_functions = True
self.scope = Scope()
self.deferral_debug_context = []
# with a regular attribute so we make them properties
@property
def is_typeshed_stub_file(self) -> bool:
return self._is_typeshed_stub_file
@property
def final_iteration(self) -> bool:
return self._final_iteration
#
# Preparing module (performed before semantic analysis)
#
def prepare_file(self, file_node: MypyFile) -> None:
if 'builtins' in self.modules:
file_node.names['__builtins__'] = SymbolTableNode(GDEF,
self.modules['builtins'])
if file_node.fullname() == 'builtins':
self.prepare_builtins_namespace(file_node)
if file_node.fullname() == 'typing':
self.prepare_typing_namespace(file_node)
def prepare_typing_namespace(self, file_node: MypyFile) -> None:
for stmt in file_node.defs.copy():
if (isinstance(stmt, AssignmentStmt) and len(stmt.lvalues) == 1 and
isinstance(stmt.lvalues[0], NameExpr)):
# Assignment to a simple name, remove it if it is a dummy alias.
if 'typing.' + stmt.lvalues[0].name in type_aliases:
file_node.defs.remove(stmt)
def prepare_builtins_namespace(self, file_node: MypyFile) -> None:
names = file_node.names
# Add empty definition for core built-in classes, since they are required for basic
# operation. These will be completed later on.
for name in CORE_BUILTIN_CLASSES:
cdef = ClassDef(name, Block([])) # Dummy ClassDef, will be replaced later
info = TypeInfo(SymbolTable(), cdef, 'builtins')
info._fullname = 'builtins.%s' % name
names[name] = SymbolTableNode(GDEF, info)
bool_info = names['bool'].node
assert isinstance(bool_info, TypeInfo)
bool_type = Instance(bool_info, [])
special_var_types = [
('None', NoneType()),
# reveal_type is a mypy-only function that gives an error with
# the type of its arg.
('reveal_type', AnyType(TypeOfAny.special_form)),
# reveal_locals is a mypy-only function that gives an error with the types of
# locals
('reveal_locals', AnyType(TypeOfAny.special_form)),
('True', bool_type),
('False', bool_type),
('__debug__', bool_type),
] # type: List[Tuple[str, Type]]
for name, typ in special_var_types:
v = Var(name, typ)
v._fullname = 'builtins.%s' % name
file_node.names[name] = SymbolTableNode(GDEF, v)
#
# Analyzing a target
#
def refresh_partial(self,
node: Union[MypyFile, FuncDef, OverloadedFuncDef],
patches: List[Tuple[int, Callable[[], None]]],
final_iteration: bool,
file_node: MypyFile,
options: Options,
active_type: Optional[TypeInfo] = None) -> None:
self.patches = patches
self.deferred = False
self.incomplete = False
self._final_iteration = final_iteration
self.missing_names = set()
with self.file_context(file_node, options, active_type):
if isinstance(node, MypyFile):
self.refresh_top_level(node)
else:
self.recurse_into_functions = True
self.accept(node)
del self.patches
def refresh_top_level(self, file_node: MypyFile) -> None:
self.recurse_into_functions = False
self.add_implicit_module_attrs(file_node)
for d in file_node.defs:
self.accept(d)
if file_node.fullname() == 'typing':
self.add_builtin_aliases(file_node)
self.adjust_public_exports()
self.export_map[self.cur_mod_id] = self.all_exports
self.all_exports = []
def add_implicit_module_attrs(self, file_node: MypyFile) -> None:
for name, t in implicit_module_attrs.items():
# unicode docstrings should be accepted in Python 2
if name == '__doc__':
if self.options.python_version >= (3, 0):
typ = UnboundType('__builtins__.str') # type: Type
else:
typ = UnionType([UnboundType('__builtins__.str'),
UnboundType('__builtins__.unicode')])
else:
assert t is not None, 'type should be specified for {}'.format(name)
typ = UnboundType(t)
existing = file_node.names.get(name)
if existing is not None and not isinstance(existing.node, PlaceholderNode):
# Already exists.
continue
an_type = self.anal_type(typ)
if an_type:
var = Var(name, an_type)
var._fullname = self.qualified_name(name)
var.is_ready = True
self.add_symbol(name, var, dummy_context())
else:
self.add_symbol(name,
PlaceholderNode(self.qualified_name(name), file_node, -1),
dummy_context())
def add_builtin_aliases(self, tree: MypyFile) -> None:
assert tree.fullname() == 'typing'
for alias, target_name in type_aliases.items():
if type_aliases_target_versions[alias] > self.options.python_version:
# This alias is not available on this Python version.
continue
name = alias.split('.')[-1]
if name in tree.names and not isinstance(tree.names[name].node, PlaceholderNode):
continue
tag = self.track_incomplete_refs()
n = self.lookup_fully_qualified_or_none(target_name)
if n:
if isinstance(n.node, PlaceholderNode):
self.mark_incomplete(name, tree)
else:
# Found built-in class target. Create alias.
target = self.named_type_or_none(target_name, [])
assert target is not None
# Transform List to List[Any], etc.
fix_instance_types(target, self.fail)
alias_node = TypeAlias(target, alias,
line=-1, column=-1, # there is no context
no_args=True, normalized=True)
self.add_symbol(name, alias_node, tree)
elif self.found_incomplete_ref(tag):
# Built-in class target may not ready yet -- defer.
self.mark_incomplete(name, tree)
else:
# Test fixtures may be missing some builtin classes, which is okay.
# Kill the placeholder if there is one.
if name in tree.names:
assert isinstance(tree.names[name].node, PlaceholderNode)
del tree.names[name]
def adjust_public_exports(self) -> None:
if '__all__' in self.globals:
for name, g in self.globals.items():
# Being included in __all__ explicitly exports and makes public.
if name in self.all_exports:
g.module_public = True
g.module_hidden = False
# But when __all__ is defined, and a symbol is not included in it,
# it cannot be public.
else:
g.module_public = False
@contextmanager
def file_context(self,
file_node: MypyFile,
options: Options,
active_type: Optional[TypeInfo] = None) -> Iterator[None]:
scope = self.scope
self.options = options
self.errors.set_file(file_node.path, file_node.fullname(), scope=scope)
self.cur_mod_node = file_node
self.cur_mod_id = file_node.fullname()
scope.enter_file(self.cur_mod_id)
self.is_stub_file = file_node.path.lower().endswith('.pyi')
self._is_typeshed_stub_file = self.errors.is_typeshed_file(file_node.path)
self.globals = file_node.names
self.tvar_scope = TypeVarScope()
self.named_tuple_analyzer = NamedTupleAnalyzer(options, self)
self.typed_dict_analyzer = TypedDictAnalyzer(options, self, self.msg)
self.enum_call_analyzer = EnumCallAnalyzer(options, self)
self.newtype_analyzer = NewTypeAnalyzer(options, self, self.msg)
# Counter that keeps track of references to undefined things potentially caused by
# incomplete namespaces.
self.num_incomplete_refs = 0
if active_type:
scope.enter_class(active_type)
self.enter_class(active_type.defn.info)
for tvar in active_type.defn.type_vars:
self.tvar_scope.bind_existing(tvar)
yield
if active_type:
scope.leave()
self.leave_class()
self.type = None
scope.leave()
del self.options
#
# Functions
#
def visit_func_def(self, defn: FuncDef) -> None:
self.statement = defn
# Visit default values because they may contain assignment expressions.
for arg in defn.arguments:
if arg.initializer:
arg.initializer.accept(self)
defn.is_conditional = self.block_depth[-1] > 0
# Set full names even for those definitions that aren't added
defn._fullname = self.qualified_name(defn.name())
# when we analyze their bodies in the second phase on analysis,
# since they were added in the first phase. Nested functions
# get always added, since they aren't separate targets.
if not self.recurse_into_functions or len(self.function_stack) > 0:
if not defn.is_decorated and not defn.is_overload:
self.add_function_to_symbol_table(defn)
if not self.recurse_into_functions:
return
with self.scope.function_scope(defn):
self.analyze_func_def(defn)
def analyze_func_def(self, defn: FuncDef) -> None:
self.function_stack.append(defn)
if defn.type:
assert isinstance(defn.type, CallableType)
self.update_function_type_variables(defn.type, defn)
self.function_stack.pop()
if self.is_class_scope():
assert self.type is not None
defn.info = self.type
if defn.type is not None and defn.name() in ('__init__', '__init_subclass__'):
assert isinstance(defn.type, CallableType)
if isinstance(get_proper_type(defn.type.ret_type), AnyType):
defn.type = defn.type.copy_modified(ret_type=NoneType())
self.prepare_method_signature(defn, self.type)
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
if defn.type:
self.check_classvar_in_signature(defn.type)
assert isinstance(defn.type, CallableType)
analyzer = self.type_analyzer()
tag = self.track_incomplete_refs()
result = analyzer.visit_callable_type(defn.type, nested=False)
if self.found_incomplete_ref(tag) or has_placeholder(result):
self.defer(defn)
return
assert isinstance(result, ProperType)
defn.type = result
self.add_type_alias_deps(analyzer.aliases_used)
self.check_function_signature(defn)
if isinstance(defn, FuncDef):
assert isinstance(defn.type, CallableType)
defn.type = set_callable_name(defn.type, defn)
self.analyze_arg_initializers(defn)
self.analyze_function_body(defn)
if defn.is_coroutine and isinstance(defn.type, CallableType) and not self.deferred:
if defn.is_async_generator:
# Async generator types are handled elsewhere
pass
else:
# A coroutine defined as `async def foo(...) -> T: ...`
# has external return type `Coroutine[Any, Any, T]`.
any_type = AnyType(TypeOfAny.special_form)
ret_type = self.named_type_or_none('typing.Coroutine',
[any_type, any_type, defn.type.ret_type])
assert ret_type is not None, "Internal error: typing.Coroutine not found"
defn.type = defn.type.copy_modified(ret_type=ret_type)
def prepare_method_signature(self, func: FuncDef, info: TypeInfo) -> None:
# Only non-static methods are special.
functype = func.type
if not func.is_static:
if func.name() == '__init_subclass__':
func.is_class = True
if not func.arguments:
self.fail('Method must have at least one argument', func)
elif isinstance(functype, CallableType):
self_type = get_proper_type(functype.arg_types[0])
if isinstance(self_type, AnyType):
leading_type = fill_typevars(info) # type: Type
if func.is_class or func.name() == '__new__':
leading_type = self.class_type(leading_type)
func.type = replace_implicit_first_type(functype, leading_type)
def set_original_def(self, previous: Optional[Node], new: Union[FuncDef, Decorator]) -> bool:
if isinstance(new, Decorator):
new = new.func
if isinstance(previous, (FuncDef, Var, Decorator)) and new.is_conditional:
new.original_def = previous
return True
else:
return False
def update_function_type_variables(self, fun_type: CallableType, defn: FuncItem) -> None:
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
a = self.type_analyzer()
fun_type.variables = a.bind_function_type_variables(fun_type, defn)
def visit_overloaded_func_def(self, defn: OverloadedFuncDef) -> None:
self.statement = defn
self.add_function_to_symbol_table(defn)
if not self.recurse_into_functions:
return
# NB: Since _visit_overloaded_func_def will call accept on the
# underlying FuncDefs, the function might get entered twice.
# This is fine, though, because only the outermost function is
# used to compute targets.
with self.scope.function_scope(defn):
self.analyze_overloaded_func_def(defn)
def analyze_overloaded_func_def(self, defn: OverloadedFuncDef) -> None:
# OverloadedFuncDef refers to any legitimate situation where you have
# more than one declaration for the same function in a row. This occurs
# with a @property with a setter or a deleter, and for a classic
# @overload.
defn._fullname = self.qualified_name(defn.name())
# TODO: avoid modifying items.
defn.items = defn.unanalyzed_items.copy()
first_item = defn.items[0]
first_item.is_overload = True
first_item.accept(self)
if isinstance(first_item, Decorator) and first_item.func.is_property:
# This is a property.
first_item.func.is_overload = True
self.analyze_property_with_multi_part_definition(defn)
typ = function_type(first_item.func, self.builtin_type('builtins.function'))
assert isinstance(typ, CallableType)
types = [typ]
else:
# This is an a normal overload. Find the item signatures, the
# implementation (if outside a stub), and any missing @overload
# decorators.
types, impl, non_overload_indexes = self.analyze_overload_sigs_and_impl(defn)
defn.impl = impl
if non_overload_indexes:
self.handle_missing_overload_decorators(defn, non_overload_indexes,
some_overload_decorators=len(types) > 0)
# If we found an implementation, remove it from the overload item list,
# as it's special.
if impl is not None:
assert impl is defn.items[-1]
defn.items = defn.items[:-1]
elif not non_overload_indexes:
self.handle_missing_overload_implementation(defn)
if types:
defn.type = Overloaded(types)
defn.type.line = defn.line
if not defn.items:
# visited the redefinition(s) already.
if not defn.impl:
# For really broken overloads with no items and no implementation we need to keep
# at least one item to hold basic information like function name.
defn.impl = defn.unanalyzed_items[-1]
return
# We know this is an overload def. Infer properties and perform some checks.
self.process_final_in_overload(defn)
self.process_static_or_class_method_in_overload(defn)
def analyze_overload_sigs_and_impl(
self,
defn: OverloadedFuncDef) -> Tuple[List[CallableType],
Optional[OverloadPart],
List[int]]:
types = []
non_overload_indexes = []
impl = None # type: Optional[OverloadPart]
for i, item in enumerate(defn.items):
if i != 0:
# Assume that the first item was already visited
item.is_overload = True
item.accept(self)
# TODO: support decorated overloaded functions properly
if isinstance(item, Decorator):
callable = function_type(item.func, self.builtin_type('builtins.function'))
assert isinstance(callable, CallableType)
if not any(refers_to_fullname(dec, 'typing.overload')
for dec in item.decorators):
if i == len(defn.items) - 1 and not self.is_stub_file:
# Last item outside a stub is impl
impl = item
else:
# Oops it wasn't an overload after all. A clear error
non_overload_indexes.append(i)
else:
item.func.is_overload = True
types.append(callable)
elif isinstance(item, FuncDef):
if i == len(defn.items) - 1 and not self.is_stub_file:
impl = item
else:
non_overload_indexes.append(i)
return types, impl, non_overload_indexes
def handle_missing_overload_decorators(self,
defn: OverloadedFuncDef,
non_overload_indexes: List[int],
some_overload_decorators: bool) -> None:
if some_overload_decorators:
for idx in non_overload_indexes:
if self.is_stub_file:
self.fail("An implementation for an overloaded function "
"is not allowed in a stub file", defn.items[idx])
else:
self.fail("The implementation for an overloaded function "
"must come last", defn.items[idx])
else:
for idx in non_overload_indexes[1:]:
self.name_already_defined(defn.name(), defn.items[idx], defn.items[0])
if defn.impl:
self.name_already_defined(defn.name(), defn.impl, defn.items[0])
for idx in reversed(non_overload_indexes):
del defn.items[idx]
def handle_missing_overload_implementation(self, defn: OverloadedFuncDef) -> None:
if not self.is_stub_file:
if self.type and self.type.is_protocol and not self.is_func_scope():
for item in defn.items:
if isinstance(item, Decorator):
item.func.is_abstract = True
else:
item.is_abstract = True
else:
self.fail(
"An overloaded function outside a stub file must have an implementation",
defn)
def process_final_in_overload(self, defn: OverloadedFuncDef) -> None:
# If the implementation is marked as @final (or the first overload in
# stubs), then the whole overloaded definition if @final.
if any(item.is_final for item in defn.items):
# We anyway mark it as final because it was probably the intention.
defn.is_final = True
# Only show the error once per overload
bad_final = next(ov for ov in defn.items if ov.is_final)
if not self.is_stub_file:
self.fail("@final should be applied only to overload implementation",
bad_final)
elif any(item.is_final for item in defn.items[1:]):
bad_final = next(ov for ov in defn.items[1:] if ov.is_final)
self.fail("In a stub file @final must be applied only to the first overload",
bad_final)
if defn.impl is not None and defn.impl.is_final:
defn.is_final = True
def process_static_or_class_method_in_overload(self, defn: OverloadedFuncDef) -> None:
class_status = []
static_status = []
for item in defn.items:
if isinstance(item, Decorator):
inner = item.func
elif isinstance(item, FuncDef):
inner = item
else:
assert False, "The 'item' variable is an unexpected type: {}".format(type(item))
class_status.append(inner.is_class)
static_status.append(inner.is_static)
if defn.impl is not None:
if isinstance(defn.impl, Decorator):
inner = defn.impl.func
elif isinstance(defn.impl, FuncDef):
inner = defn.impl
else:
assert False, "Unexpected impl type: {}".format(type(defn.impl))
class_status.append(inner.is_class)
static_status.append(inner.is_static)
if len(set(class_status)) != 1:
self.msg.overload_inconsistently_applies_decorator('classmethod', defn)
elif len(set(static_status)) != 1:
self.msg.overload_inconsistently_applies_decorator('staticmethod', defn)
else:
defn.is_class = class_status[0]
defn.is_static = static_status[0]
def analyze_property_with_multi_part_definition(self, defn: OverloadedFuncDef) -> None:
defn.is_property = True
items = defn.items
first_item = cast(Decorator, defn.items[0])
for item in items[1:]:
if isinstance(item, Decorator) and len(item.decorators) == 1:
node = item.decorators[0]
if isinstance(node, MemberExpr):
if node.name == 'setter':
# The first item represents the entire property.
first_item.var.is_settable_property = True
# Get abstractness from the original definition.
item.func.is_abstract = first_item.func.is_abstract
else:
self.fail("Decorated property not supported", item)
if isinstance(item, Decorator):
item.func.accept(self)
def add_function_to_symbol_table(self, func: Union[FuncDef, OverloadedFuncDef]) -> None:
if self.is_class_scope():
assert self.type is not None
func.info = self.type
func._fullname = self.qualified_name(func.name())
self.add_symbol(func.name(), func, func)
def analyze_arg_initializers(self, defn: FuncItem) -> None:
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
# Analyze default arguments
for arg in defn.arguments:
if arg.initializer:
arg.initializer.accept(self)
def analyze_function_body(self, defn: FuncItem) -> None:
is_method = self.is_class_scope()
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
# Bind the type variables again to visit the body.
if defn.type:
a = self.type_analyzer()
a.bind_function_type_variables(cast(CallableType, defn.type), defn)
self.function_stack.append(defn)
self.enter(defn)
for arg in defn.arguments:
self.add_local(arg.variable, defn)
# The first argument of a non-static, non-class method is like 'self'
# (though the name could be different), having the enclosing class's
if is_method and not defn.is_static and not defn.is_class and defn.arguments:
defn.arguments[0].variable.is_self = True
defn.body.accept(self)
self.leave()
self.function_stack.pop()
def check_classvar_in_signature(self, typ: ProperType) -> None:
if isinstance(typ, Overloaded):
for t in typ.items():
self.check_classvar_in_signature(t)
return
if not isinstance(typ, CallableType):
return
for t in get_proper_types(typ.arg_types) + [get_proper_type(typ.ret_type)]:
if self.is_classvar(t):
self.fail_invalid_classvar(t)
break
def check_function_signature(self, fdef: FuncItem) -> None:
sig = fdef.type
assert isinstance(sig, CallableType)
if len(sig.arg_types) < len(fdef.arguments):
self.fail('Type signature has too few arguments', fdef)
num_extra_anys = len(fdef.arguments) - len(sig.arg_types)
extra_anys = [AnyType(TypeOfAny.from_error)] * num_extra_anys
sig.arg_types.extend(extra_anys)
elif len(sig.arg_types) > len(fdef.arguments):
self.fail('Type signature has too many arguments', fdef, blocker=True)
def visit_decorator(self, dec: Decorator) -> None:
self.statement = dec
dec.decorators = dec.original_decorators.copy()
dec.func.is_conditional = self.block_depth[-1] > 0
if not dec.is_overload:
self.add_symbol(dec.name(), dec, dec)
dec.func._fullname = self.qualified_name(dec.name())
for d in dec.decorators:
d.accept(self)
removed = [] # type: List[int]
no_type_check = False
for i, d in enumerate(dec.decorators):
# A bunch of decorators are special cased here.
if refers_to_fullname(d, 'abc.abstractmethod'):
removed.append(i)
dec.func.is_abstract = True
self.check_decorated_function_is_method('abstractmethod', dec)
elif (refers_to_fullname(d, 'asyncio.coroutines.coroutine') or
refers_to_fullname(d, 'types.coroutine')):
removed.append(i)
dec.func.is_awaitable_coroutine = True
elif refers_to_fullname(d, 'builtins.staticmethod'):
removed.append(i)
dec.func.is_static = True
dec.var.is_staticmethod = True
self.check_decorated_function_is_method('staticmethod', dec)
elif refers_to_fullname(d, 'builtins.classmethod'):
removed.append(i)
dec.func.is_class = True
dec.var.is_classmethod = True
self.check_decorated_function_is_method('classmethod', dec)
elif (refers_to_fullname(d, 'builtins.property') or
refers_to_fullname(d, 'abc.abstractproperty')):
removed.append(i)
dec.func.is_property = True
dec.var.is_property = True
if refers_to_fullname(d, 'abc.abstractproperty'):
dec.func.is_abstract = True
self.check_decorated_function_is_method('property', dec)
if len(dec.func.arguments) > 1:
self.fail('Too many arguments', dec.func)
elif refers_to_fullname(d, 'typing.no_type_check'):
dec.var.type = AnyType(TypeOfAny.special_form)
no_type_check = True
elif (refers_to_fullname(d, 'typing.final') or
refers_to_fullname(d, 'typing_extensions.final')):
if self.is_class_scope():
assert self.type is not None, "No type set at class scope"
if self.type.is_protocol:
self.msg.protocol_members_cant_be_final(d)
else:
dec.func.is_final = True
dec.var.is_final = True
removed.append(i)
else:
self.fail("@final cannot be used with non-method functions", d)
for i in reversed(removed):
del dec.decorators[i]
if (not dec.is_overload or dec.var.is_property) and self.type:
dec.var.info = self.type
dec.var.is_initialized_in_class = True
if not no_type_check and self.recurse_into_functions:
dec.func.accept(self)
if dec.decorators and dec.var.is_property:
self.fail('Decorated property not supported', dec)
def check_decorated_function_is_method(self, decorator: str,
context: Context) -> None:
if not self.type or self.is_func_scope():
self.fail("'%s' used with a non-method" % decorator, context)
#
# Classes
#
def visit_class_def(self, defn: ClassDef) -> None:
self.statement = defn
with self.tvar_scope_frame(self.tvar_scope.class_frame()):
self.analyze_class(defn)
def analyze_class(self, defn: ClassDef) -> None:
fullname = self.qualified_name(defn.name)
if not defn.info and not self.is_core_builtin_class(defn):
# Add placeholder so that self-references in base classes can be
# resolved. We don't want this to cause a deferral, since if there
# before returning.
placeholder = PlaceholderNode(fullname, defn, defn.line, becomes_typeinfo=True)
self.add_symbol(defn.name, placeholder, defn, can_defer=False)
tag = self.track_incomplete_refs()
# Restore base classes after previous iteration (things like Generic[T] might be removed).
defn.base_type_exprs.extend(defn.removed_base_type_exprs)
defn.removed_base_type_exprs.clear()
self.update_metaclass(defn)
bases = defn.base_type_exprs
bases, tvar_defs, is_protocol = self.clean_up_bases_and_infer_type_variables(defn, bases,
context=defn)
for tvd in tvar_defs:
if any(has_placeholder(t) for t in [tvd.upper_bound] + tvd.values):
# Some type variable bounds or values are not ready, we need
# to re-analyze this class.
self.defer()
self.analyze_class_keywords(defn)
result = self.analyze_base_classes(bases)
if result is None or self.found_incomplete_ref(tag):
# Something was incomplete. Defer current target.
self.mark_incomplete(defn.name, defn)
return
base_types, base_error = result
if any(isinstance(base, PlaceholderType) for base, _ in base_types):
# We need to know the TypeInfo of each base to construct the MRO. Placeholder types
# are okay in nested positions, since they can't affect the MRO.
self.mark_incomplete(defn.name, defn)
return
is_typeddict, info = self.typed_dict_analyzer.analyze_typeddict_classdef(defn)
if is_typeddict:
if info is None:
self.mark_incomplete(defn.name, defn)
else:
self.prepare_class_def(defn, info)
return
if self.analyze_namedtuple_classdef(defn):
return
self.prepare_class_def(defn)
defn.type_vars = tvar_defs
defn.info.type_vars = [tvar.name for tvar in tvar_defs]
if base_error:
defn.info.fallback_to_any = True
with self.scope.class_scope(defn.info):
self.configure_base_classes(defn, base_types)
defn.info.is_protocol = is_protocol
self.analyze_metaclass(defn)
defn.info.runtime_protocol = False
for decorator in defn.decorators:
self.analyze_class_decorator(defn, decorator)
self.analyze_class_body_common(defn)
def is_core_builtin_class(self, defn: ClassDef) -> bool:
return self.cur_mod_id == 'builtins' and defn.name in CORE_BUILTIN_CLASSES
def analyze_class_body_common(self, defn: ClassDef) -> None:
self.enter_class(defn.info)
defn.defs.accept(self)
self.apply_class_plugin_hooks(defn)
self.leave_class()
def analyze_namedtuple_classdef(self, defn: ClassDef) -> bool:
if defn.info and defn.info.is_named_tuple:
# in the named tuple class body.
is_named_tuple, info = True, defn.info # type: bool, Optional[TypeInfo]
else:
is_named_tuple, info = self.named_tuple_analyzer.analyze_namedtuple_classdef(defn)
if is_named_tuple:
if info is None:
self.mark_incomplete(defn.name, defn)
else:
self.prepare_class_def(defn, info)
with self.scope.class_scope(defn.info):
with self.named_tuple_analyzer.save_namedtuple_body(info):
self.analyze_class_body_common(defn)
return True
return False
def apply_class_plugin_hooks(self, defn: ClassDef) -> None:
def get_fullname(expr: Expression) -> Optional[str]:
if isinstance(expr, CallExpr):
return get_fullname(expr.callee)
elif isinstance(expr, IndexExpr):
return get_fullname(expr.base)
elif isinstance(expr, RefExpr):
if expr.fullname:
return expr.fullname
# If we don't have a fullname look it up. This happens because base classes are
sym = self.lookup_type_node(expr)
if sym:
return sym.fullname
return None
for decorator in defn.decorators:
decorator_name = get_fullname(decorator)
if decorator_name:
hook = self.plugin.get_class_decorator_hook(decorator_name)
if hook:
hook(ClassDefContext(defn, decorator, self))
if defn.metaclass:
metaclass_name = get_fullname(defn.metaclass)
if metaclass_name:
hook = self.plugin.get_metaclass_hook(metaclass_name)
if hook:
hook(ClassDefContext(defn, defn.metaclass, self))
for base_expr in defn.base_type_exprs:
base_name = get_fullname(base_expr)
if base_name:
hook = self.plugin.get_base_class_hook(base_name)
if hook:
hook(ClassDefContext(defn, base_expr, self))
def analyze_class_keywords(self, defn: ClassDef) -> None:
for value in defn.keywords.values():
value.accept(self)
def enter_class(self, info: TypeInfo) -> None:
self.type_stack.append(self.type)
self.locals.append(None)
self.is_comprehension_stack.append(False)
self.block_depth.append(-1)
self.type = info
def leave_class(self) -> None:
self.block_depth.pop()
self.locals.pop()
self.is_comprehension_stack.pop()
self.type = self.type_stack.pop()
def analyze_class_decorator(self, defn: ClassDef, decorator: Expression) -> None:
decorator.accept(self)
if isinstance(decorator, RefExpr):
if decorator.fullname in RUNTIME_PROTOCOL_DECOS:
if defn.info.is_protocol:
defn.info.runtime_protocol = True
else:
self.fail('@runtime_checkable can only be used with protocol classes',
defn)
elif decorator.fullname in ('typing.final',
'typing_extensions.final'):
defn.info.is_final = True
def clean_up_bases_and_infer_type_variables(
self,
defn: ClassDef,
base_type_exprs: List[Expression],
context: Context) -> Tuple[List[Expression],
List[TypeVarDef],
bool]:
removed = []
declared_tvars = []
is_protocol = False
for i, base_expr in enumerate(base_type_exprs):
self.analyze_type_expr(base_expr)
try:
base = expr_to_unanalyzed_type(base_expr)
except TypeTranslationError:
continue
result = self.analyze_class_typevar_declaration(base)
if result is not None:
if declared_tvars:
self.fail('Only single Generic[...] or Protocol[...] can be in bases', context)
removed.append(i)
tvars = result[0]
is_protocol |= result[1]
declared_tvars.extend(tvars)
if isinstance(base, UnboundType):
sym = self.lookup_qualified(base.name, base)
if sym is not None and sym.node is not None:
if (sym.node.fullname() in ('typing.Protocol',
'typing_extensions.Protocol') and
i not in removed):
removed.append(i)
is_protocol = True
all_tvars = self.get_all_bases_tvars(base_type_exprs, removed)
if declared_tvars:
if len(remove_dups(declared_tvars)) < len(declared_tvars):
self.fail("Duplicate type variables in Generic[...] or Protocol[...]", context)
declared_tvars = remove_dups(declared_tvars)
if not set(all_tvars).issubset(set(declared_tvars)):
self.fail("If Generic[...] or Protocol[...] is present"
" it should list all type variables", context)
declared_tvars = remove_dups(declared_tvars + all_tvars)
else:
declared_tvars = all_tvars
for i in reversed(removed):
defn.removed_base_type_exprs.append(defn.base_type_exprs[i])
del base_type_exprs[i]
tvar_defs = []
for name, tvar_expr in declared_tvars:
tvar_def = self.tvar_scope.bind_new(name, tvar_expr)
tvar_defs.append(tvar_def)
return base_type_exprs, tvar_defs, is_protocol
def analyze_class_typevar_declaration(self, base: Type) -> Optional[Tuple[TypeVarList, bool]]:
if not isinstance(base, UnboundType):
return None
unbound = base
sym = self.lookup_qualified(unbound.name, unbound)
if sym is None or sym.node is None:
return None
if (sym.node.fullname() == 'typing.Generic' or
sym.node.fullname() == 'typing.Protocol' and base.args or
sym.node.fullname() == 'typing_extensions.Protocol' and base.args):
is_proto = sym.node.fullname() != 'typing.Generic'
tvars = []
for arg in unbound.args:
tag = self.track_incomplete_refs()
tvar = self.analyze_unbound_tvar(arg)
if tvar:
tvars.append(tvar)
elif not self.found_incomplete_ref(tag):
self.fail('Free type variable expected in %s[...]' %
sym.node.name(), base)
return tvars, is_proto
return None
def analyze_unbound_tvar(self, t: Type) -> Optional[Tuple[str, TypeVarExpr]]:
if not isinstance(t, UnboundType):
return None
unbound = t
sym = self.lookup_qualified(unbound.name, unbound)
if sym and isinstance(sym.node, PlaceholderNode):
self.record_incomplete_ref()
if sym is None or not isinstance(sym.node, TypeVarExpr):
return None
elif sym.fullname and not self.tvar_scope.allow_binding(sym.fullname):
return None
else:
assert isinstance(sym.node, TypeVarExpr)
return unbound.name, sym.node
def get_all_bases_tvars(self,
base_type_exprs: List[Expression],
removed: List[int]) -> TypeVarList:
tvars = [] # type: TypeVarList
for i, base_expr in enumerate(base_type_exprs):
if i not in removed:
try:
base = expr_to_unanalyzed_type(base_expr)
except TypeTranslationError:
# This error will be caught later.
continue
base_tvars = base.accept(TypeVariableQuery(self.lookup_qualified, self.tvar_scope))
tvars.extend(base_tvars)
return remove_dups(tvars)
def prepare_class_def(self, defn: ClassDef, info: Optional[TypeInfo] = None) -> None:
if not defn.info:
defn.fullname = self.qualified_name(defn.name)
# TODO: Nested classes
info = info or self.make_empty_type_info(defn)
defn.info = info
info.defn = defn
if not self.is_func_scope():
info._fullname = self.qualified_name(defn.name)
else:
info._fullname = info.name()
self.add_symbol(defn.name, defn.info, defn)
if self.is_nested_within_func_scope():
# We need to preserve local classes, let's store them
if '@' not in defn.info._fullname:
local_name = defn.info._fullname + '@' + str(defn.line)
if defn.info.is_named_tuple:
defn.info._fullname += '@' + str(defn.line)
else:
defn.info._fullname = self.cur_mod_id + '.' + local_name
else:
local_name = defn.info._fullname
defn.fullname = defn.info._fullname
self.globals[local_name] = SymbolTableNode(GDEF, defn.info)
def make_empty_type_info(self, defn: ClassDef) -> TypeInfo:
if (self.is_module_scope()
and self.cur_mod_id == 'builtins'
and defn.name in CORE_BUILTIN_CLASSES):
info = self.globals[defn.name].node
assert isinstance(info, TypeInfo)
else:
info = TypeInfo(SymbolTable(), defn, self.cur_mod_id)
info.set_line(defn)
return info
def get_name_repr_of_expr(self, expr: Expression) -> Optional[str]:
if isinstance(expr, NameExpr):
return expr.name
if isinstance(expr, MemberExpr):
return get_member_expr_fullname(expr)
if isinstance(expr, IndexExpr):
return self.get_name_repr_of_expr(expr.base)
if isinstance(expr, CallExpr):
return self.get_name_repr_of_expr(expr.callee)
return None
def analyze_base_classes(
self,
base_type_exprs: List[Expression]) -> Optional[Tuple[List[Tuple[ProperType,
Expression]],
bool]]:
is_error = False
bases = []
for base_expr in base_type_exprs:
if (isinstance(base_expr, RefExpr) and
base_expr.fullname in ('typing.NamedTuple',) + TPDICT_NAMES):
continue
try:
base = self.expr_to_analyzed_type(base_expr, allow_placeholder=True)
except TypeTranslationError:
name = self.get_name_repr_of_expr(base_expr)
if isinstance(base_expr, CallExpr):
msg = 'Unsupported dynamic base class'
else:
msg = 'Invalid base class'
if name:
msg += ' "{}"'.format(name)
self.fail(msg, base_expr)
is_error = True
continue
if base is None:
return None
base = get_proper_type(base)
bases.append((base, base_expr))
return bases, is_error
def configure_base_classes(self,
defn: ClassDef,
bases: List[Tuple[ProperType, Expression]]) -> None:
base_types = []
info = defn.info
info.tuple_type = None
for base, base_expr in bases:
if isinstance(base, TupleType):
actual_base = self.configure_tuple_base_class(defn, base, base_expr)
base_types.append(actual_base)
elif isinstance(base, Instance):
if base.type.is_newtype:
self.fail("Cannot subclass NewType", defn)
base_types.append(base)
elif isinstance(base, AnyType):
if self.options.disallow_subclassing_any:
if isinstance(base_expr, (NameExpr, MemberExpr)):
msg = "Class cannot subclass '{}' (has type 'Any')".format(base_expr.name)
else:
msg = "Class cannot subclass value of type 'Any'"
self.fail(msg, base_expr)
info.fallback_to_any = True
else:
msg = 'Invalid base class'
name = self.get_name_repr_of_expr(base_expr)
if name:
msg += ' "{}"'.format(name)
self.fail(msg, base_expr)
info.fallback_to_any = True
if self.options.disallow_any_unimported and has_any_from_unimported_type(base):
if isinstance(base_expr, (NameExpr, MemberExpr)):
prefix = "Base type {}".format(base_expr.name)
else:
prefix = "Base type"
self.msg.unimported_type_becomes_any(prefix, base, base_expr)
check_for_explicit_any(base, self.options, self.is_typeshed_stub_file, self.msg,
context=base_expr)
if not base_types and defn.fullname != 'builtins.object':
base_types.append(self.object_type())
info.bases = base_types
if not self.verify_base_classes(defn):
defn.info.mro = [defn.info, self.object_type().type]
return
self.calculate_class_mro(defn, self.object_type)
def configure_tuple_base_class(self,
defn: ClassDef,
base: TupleType,
base_expr: Expression) -> Instance:
info = defn.info
if info.tuple_type and info.tuple_type != base:
self.fail("Class has two incompatible bases derived from tuple", defn)
defn.has_incompatible_baseclass = True
info.tuple_type = base
if isinstance(base_expr, CallExpr):
defn.analyzed = NamedTupleExpr(base.partial_fallback.type)
defn.analyzed.line = defn.line
defn.analyzed.column = defn.column
if base.partial_fallback.type.fullname() == 'builtins.tuple':
self.schedule_patch(PRIORITY_FALLBACKS, lambda: calculate_tuple_fallback(base))
return base.partial_fallback
def calculate_class_mro(self, defn: ClassDef,
obj_type: Optional[Callable[[], Instance]] = None) -> None:
try:
calculate_mro(defn.info, obj_type)
except MroError:
self.fail_blocker('Cannot determine consistent method resolution '
'order (MRO) for "%s"' % defn.name, defn)
defn.info.mro = []
if defn.fullname:
hook = self.plugin.get_customize_class_mro_hook(defn.fullname)
if hook:
hook(ClassDefContext(defn, FakeExpression(), self))
def update_metaclass(self, defn: ClassDef) -> None:
python2_meta_expr = None
if self.options.python_version[0] == 2:
for body_node in defn.defs.body:
if isinstance(body_node, ClassDef) and body_node.name == "__metaclass__":
self.fail("Metaclasses defined as inner classes are not supported", body_node)
break
elif isinstance(body_node, AssignmentStmt) and len(body_node.lvalues) == 1:
lvalue = body_node.lvalues[0]
if isinstance(lvalue, NameExpr) and lvalue.name == "__metaclass__":
python2_meta_expr = body_node.rvalue
with_meta_expr = None
if len(defn.base_type_exprs) == 1:
base_expr = defn.base_type_exprs[0]
if isinstance(base_expr, CallExpr) and isinstance(base_expr.callee, RefExpr):
base_expr.callee.accept(self)
if (base_expr.callee.fullname == 'six.with_metaclass'
and len(base_expr.args) >= 1
and all(kind == ARG_POS for kind in base_expr.arg_kinds)):
with_meta_expr = base_expr.args[0]
defn.base_type_exprs = base_expr.args[1:]
add_meta_expr = None
for dec_expr in defn.decorators:
if isinstance(dec_expr, CallExpr) and isinstance(dec_expr.callee, RefExpr):
dec_expr.callee.accept(self)
if (dec_expr.callee.fullname == 'six.add_metaclass'
and len(dec_expr.args) == 1
and dec_expr.arg_kinds[0] == ARG_POS):
add_meta_expr = dec_expr.args[0]
break
metas = {defn.metaclass, python2_meta_expr, with_meta_expr, add_meta_expr} - {None}
if len(metas) == 0:
return
if len(metas) > 1:
self.fail("Multiple metaclass definitions", defn)
return
defn.metaclass = metas.pop()
def verify_base_classes(self, defn: ClassDef) -> bool:
info = defn.info
for base in info.bases:
baseinfo = base.type
if self.is_base_class(info, baseinfo):
self.fail('Cycle in inheritance hierarchy', defn, blocker=True)
info.bases = []
if baseinfo.fullname() == 'builtins.bool':
self.fail("'%s' is not a valid base class" %
baseinfo.name(), defn, blocker=True)
return False
dup = find_duplicate(info.direct_base_classes())
if dup:
self.fail('Duplicate base class "%s"' % dup.name(), defn, blocker=True)
return False
return True
def is_base_class(self, t: TypeInfo, s: TypeInfo) -> bool:
worklist = [s]
visited = {s}
while worklist:
nxt = worklist.pop()
if nxt == t:
return True
for base in nxt.bases:
if base.type not in visited:
worklist.append(base.type)
visited.add(base.type)
return False
def analyze_metaclass(self, defn: ClassDef) -> None:
if defn.metaclass:
metaclass_name = None
if isinstance(defn.metaclass, NameExpr):
metaclass_name = defn.metaclass.name
elif isinstance(defn.metaclass, MemberExpr):
metaclass_name = get_member_expr_fullname(defn.metaclass)
if metaclass_name is None:
self.fail("Dynamic metaclass not supported for '%s'" % defn.name, defn.metaclass)
return
sym = self.lookup_qualified(metaclass_name, defn.metaclass)
if sym is None:
return
if isinstance(sym.node, Var) and isinstance(get_proper_type(sym.node.type), AnyType):
return
if isinstance(sym.node, PlaceholderNode):
self.defer(defn)
return
if not isinstance(sym.node, TypeInfo) or sym.node.tuple_type is not None:
self.fail("Invalid metaclass '%s'" % metaclass_name, defn.metaclass)
return
if not sym.node.is_metaclass():
self.fail("Metaclasses not inheriting from 'type' are not supported",
defn.metaclass)
return
inst = fill_typevars(sym.node)
assert isinstance(inst, Instance)
defn.info.declared_metaclass = inst
defn.info.metaclass_type = defn.info.calculate_metaclass_type()
if any(info.is_protocol for info in defn.info.mro):
if (not defn.info.metaclass_type or
defn.info.metaclass_type.type.fullname() == 'builtins.type'):
abc_meta = self.named_type_or_none('abc.ABCMeta', [])
if abc_meta is not None:
defn.info.metaclass_type = abc_meta
if defn.info.metaclass_type is None:
if defn.metaclass is not None:
self.fail("Inconsistent metaclass structure for '%s'" % defn.name, defn)
else:
if defn.info.metaclass_type.type.has_base('enum.EnumMeta'):
defn.info.is_enum = True
if defn.type_vars:
self.fail("Enum class cannot be generic", defn)
#
# Imports
#
def visit_import(self, i: Import) -> None:
self.statement = i
for id, as_id in i.ids:
if as_id is not None:
self.add_module_symbol(id, as_id, module_public=True, context=i)
else:
# Modules imported in a stub file without using 'as x' won't get exported
module_public = (
not self.is_stub_file
and self.options.implicit_reexport
)
base = id.split('.')[0]
self.add_module_symbol(base, base, module_public=module_public,
context=i, module_hidden=not module_public)
def visit_import_from(self, imp: ImportFrom) -> None:
self.statement = imp
module_id = self.correct_relative_import(imp)
module = self.modules.get(module_id)
for id, as_id in imp.names:
fullname = module_id + '.' + id
if module is None:
node = None
elif module_id == self.cur_mod_id and fullname in self.modules:
node = SymbolTableNode(GDEF, self.modules[fullname])
else:
node = module.names.get(id)
missing_submodule = False
imported_id = as_id or id
# If the module does not contain a symbol with the name 'id',
# try checking if it's a module instead.
if not node:
mod = self.modules.get(fullname)
if mod is not None:
kind = self.current_symbol_kind()
node = SymbolTableNode(kind, mod)
elif fullname in self.missing_modules:
missing_submodule = True
if (module and not node and (module.is_stub or self.options.python_version >= (3, 7))
and '__getattr__' in module.names):
fullname = module_id + '.' + id
gvar = self.create_getattr_var(module.names['__getattr__'], imported_id, fullname)
if gvar:
self.add_symbol(imported_id, gvar, imp)
continue
if node and not node.module_hidden:
self.process_imported_symbol(node, module_id, id, as_id, fullname, imp)
elif module and not missing_submodule:
self.report_missing_module_attribute(module_id, id, imported_id, imp)
else:
self.add_unknown_imported_symbol(imported_id, imp, target_name=fullname)
def process_imported_symbol(self,
node: SymbolTableNode,
module_id: str,
id: str,
as_id: Optional[str],
fullname: str,
context: ImportBase) -> None:
imported_id = as_id or id
if isinstance(node.node, PlaceholderNode):
if self.final_iteration:
self.report_missing_module_attribute(module_id, id, imported_id, context)
return
else:
self.mark_incomplete(imported_id, node.node, becomes_typeinfo=True)
existing_symbol = self.globals.get(imported_id)
if (existing_symbol and not isinstance(existing_symbol.node, PlaceholderNode) and
not isinstance(node.node, PlaceholderNode)):
if self.process_import_over_existing_name(
imported_id, existing_symbol, node, context):
return
if existing_symbol and isinstance(node.node, PlaceholderNode):
return
module_public = (
not self.is_stub_file
and self.options.implicit_reexport
or as_id is not None
)
module_hidden = not module_public and fullname not in self.modules
self.add_imported_symbol(imported_id, node, context,
module_public=module_public,
module_hidden=module_hidden)
def report_missing_module_attribute(self, import_id: str, source_id: str, imported_id: str,
context: Node) -> None:
if self.is_incomplete_namespace(import_id):
# is incomplete. Defer the current target.
self.mark_incomplete(imported_id, context)
return
message = "Module '{}' has no attribute '{}'".format(import_id, source_id)
# Suggest alternatives, if any match is found.
module = self.modules.get(import_id)
if module:
alternatives = set(module.names.keys()).difference({source_id})
matches = best_matches(source_id, alternatives)[:3]
if matches:
suggestion = "; maybe {}?".format(pretty_or(matches))
message += "{}".format(suggestion)
self.fail(message, context, code=codes.ATTR_DEFINED)
self.add_unknown_imported_symbol(imported_id, context)
if import_id == 'typing':
# The user probably has a missing definition in a test fixture. Let's verify.
fullname = 'builtins.{}'.format(source_id.lower())
if (self.lookup_fully_qualified_or_none(fullname) is None and
fullname in SUGGESTED_TEST_FIXTURES):
self.add_fixture_note(fullname, context)
def process_import_over_existing_name(self,
imported_id: str, existing_symbol: SymbolTableNode,
module_symbol: SymbolTableNode,
import_node: ImportBase) -> bool:
if existing_symbol.node is module_symbol.node:
return False
if (existing_symbol.kind in (LDEF, GDEF, MDEF) and
isinstance(existing_symbol.node, (Var, FuncDef, TypeInfo, Decorator, TypeAlias))):
lvalue = NameExpr(imported_id)
lvalue.kind = existing_symbol.kind
lvalue.node = existing_symbol.node
rvalue = NameExpr(imported_id)
rvalue.kind = module_symbol.kind
rvalue.node = module_symbol.node
if isinstance(rvalue.node, TypeAlias):
# Suppress bogus errors from the dummy assignment if rvalue is an alias.
# Otherwise mypy may complain that alias is invalid in runtime context.
rvalue.is_alias_rvalue = True
assignment = AssignmentStmt([lvalue], rvalue)
for node in assignment, lvalue, rvalue:
node.set_line(import_node)
import_node.assignments.append(assignment)
return True
return False
def add_fixture_note(self, fullname: str, ctx: Context) -> None:
self.note('Maybe your test fixture does not define "{}"?'.format(fullname), ctx)
if fullname in SUGGESTED_TEST_FIXTURES:
self.note(
'Consider adding [builtins fixtures/{}] to your test description'.format(
SUGGESTED_TEST_FIXTURES[fullname]), ctx)
def correct_relative_import(self, node: Union[ImportFrom, ImportAll]) -> str:
import_id, ok = correct_relative_import(self.cur_mod_id, node.relative, node.id,
self.cur_mod_node.is_package_init_file())
if not ok:
self.fail("Relative import climbs too many namespaces", node)
return import_id
def visit_import_all(self, i: ImportAll) -> None:
i_id = self.correct_relative_import(i)
if i_id in self.modules:
m = self.modules[i_id]
if self.is_incomplete_namespace(i_id):
# Any names could be missing from the current namespace if the target module
# namespace is incomplete.
self.mark_incomplete('*', i)
for name, node in m.names.items():
if node is None:
continue
# if '__all__' exists, all nodes not included have had module_public set to
# False, and we can skip checking '_' because it's been explicitly included.
if node.module_public and (not name.startswith('_') or '__all__' in m.names):
if isinstance(node.node, MypyFile):
self.imports.add(node.node.fullname())
existing_symbol = self.lookup_current_scope(name)
if existing_symbol and not isinstance(node.node, PlaceholderNode):
if self.process_import_over_existing_name(
name, existing_symbol, node, i):
continue
module_public = self.is_stub_file or self.options.implicit_reexport
self.add_imported_symbol(name, node, i,
module_public=module_public,
module_hidden=not module_public)
else:
pass
#
# Assignment
#
def visit_assignment_expr(self, s: AssignmentExpr) -> None:
s.value.accept(self)
self.analyze_lvalue(s.target, escape_comprehensions=True)
def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
self.statement = s
# Special case assignment like X = X.
if self.analyze_identity_global_assignment(s):
return
tag = self.track_incomplete_refs()
s.rvalue.accept(self)
if self.found_incomplete_ref(tag) or self.should_wait_rhs(s.rvalue):
# Initializer couldn't be fully analyzed. Defer the current node and give up.
# added later in this scope, since an earlier definition should take precedence.
for expr in names_modified_by_assignment(s):
self.mark_incomplete(expr.name, expr)
return
# The r.h.s. is now ready to be classified, first check if it is a special form:
special_form = False
# * type alias
if self.check_and_set_up_type_alias(s):
s.is_alias_def = True
special_form = True
# * type variable definition
elif self.process_typevar_declaration(s):
special_form = True
# * type constructors
elif self.analyze_namedtuple_assign(s):
special_form = True
elif self.analyze_typeddict_assign(s):
special_form = True
elif self.newtype_analyzer.process_newtype_declaration(s):
special_form = True
elif self.analyze_enum_assign(s):
special_form = True
if special_form:
self.record_special_form_lvalue(s)
return
# OK, this is a regular assignment, perform the necessary analysis steps.
s.is_final_def = self.unwrap_final(s)
self.analyze_lvalues(s)
self.check_final_implicit_def(s)
self.check_classvar(s)
self.process_type_annotation(s)
self.apply_dynamic_class_hook(s)
self.store_final_status(s)
if not s.type:
self.process_module_assignment(s.lvalues, s.rvalue, s)
self.process__all__(s)
def analyze_identity_global_assignment(self, s: AssignmentStmt) -> bool:
if not isinstance(s.rvalue, NameExpr) or len(s.lvalues) != 1:
# Not of form 'X = X'
return False
lvalue = s.lvalues[0]
if not isinstance(lvalue, NameExpr) or s.rvalue.name != lvalue.name:
# Not of form 'X = X'
return False
if self.type is not None or self.is_func_scope():
# Not in global scope
return False
# It's an assignment like 'X = X' in the global scope.
name = lvalue.name
sym = self.lookup(name, s)
if sym is None:
if self.final_iteration:
return False
else:
self.defer()
return True
else:
if sym.node is None:
return False
if name not in self.globals:
self.add_symbol(name, sym.node, s)
if not isinstance(sym.node, PlaceholderNode):
for node in s.rvalue, lvalue:
node.node = sym.node
node.kind = GDEF
node.fullname = sym.node.fullname()
return True
def should_wait_rhs(self, rv: Expression) -> bool:
if self.final_iteration:
return False
if isinstance(rv, NameExpr):
n = self.lookup(rv.name, rv)
if n and isinstance(n.node, PlaceholderNode) and not n.node.becomes_typeinfo:
return True
elif isinstance(rv, MemberExpr):
fname = get_member_expr_fullname(rv)
if fname:
n = self.lookup_qualified(fname, rv, suppress_errors=True)
if n and isinstance(n.node, PlaceholderNode) and not n.node.becomes_typeinfo:
return True
elif isinstance(rv, IndexExpr) and isinstance(rv.base, RefExpr):
return self.should_wait_rhs(rv.base)
elif isinstance(rv, CallExpr) and isinstance(rv.callee, RefExpr):
return self.should_wait_rhs(rv.callee)
return False
def can_be_type_alias(self, rv: Expression) -> bool:
if isinstance(rv, RefExpr) and self.is_type_ref(rv, bare=True):
return True
if isinstance(rv, IndexExpr) and self.is_type_ref(rv.base, bare=False):
return True
if self.is_none_alias(rv):
return True
return False
def is_type_ref(self, rv: Expression, bare: bool = False) -> bool:
if not isinstance(rv, RefExpr):
return False
if isinstance(rv.node, TypeVarExpr):
self.fail('Type variable "{}" is invalid as target for type alias'.format(
rv.fullname), rv)
return False
if bare:
valid_refs = {'typing.Any', 'typing.Tuple', 'typing.Callable'}
else:
valid_refs = type_constructors
if isinstance(rv.node, TypeAlias) or rv.fullname in valid_refs:
return True
if isinstance(rv.node, TypeInfo):
if bare:
return True
return not rv.node.is_enum
if isinstance(rv, NameExpr):
n = self.lookup(rv.name, rv)
if n and isinstance(n.node, PlaceholderNode) and n.node.becomes_typeinfo:
return True
elif isinstance(rv, MemberExpr):
fname = get_member_expr_fullname(rv)
if fname:
n = self.lookup_qualified(fname, rv, suppress_errors=True)
if n and isinstance(n.node, PlaceholderNode) and n.node.becomes_typeinfo:
return True
return False
def is_none_alias(self, node: Expression) -> bool:
if isinstance(node, CallExpr):
if (isinstance(node.callee, NameExpr) and len(node.args) == 1 and
isinstance(node.args[0], NameExpr)):
call = self.lookup_qualified(node.callee.name, node.callee)
arg = self.lookup_qualified(node.args[0].name, node.args[0])
if (call is not None and call.node and call.node.fullname() == 'builtins.type' and
arg is not None and arg.node and arg.node.fullname() == 'builtins.None'):
return True
return False
def record_special_form_lvalue(self, s: AssignmentStmt) -> None:
lvalue = s.lvalues[0]
assert isinstance(lvalue, NameExpr)
lvalue.is_special_form = True
if self.current_symbol_kind() == GDEF:
lvalue.fullname = self.qualified_name(lvalue.name)
lvalue.kind = self.current_symbol_kind()
def analyze_enum_assign(self, s: AssignmentStmt) -> bool:
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.analyzed, EnumCallExpr):
return True
return self.enum_call_analyzer.process_enum_call(s, self.is_func_scope())
def analyze_namedtuple_assign(self, s: AssignmentStmt) -> bool:
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.analyzed, NamedTupleExpr):
return True
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], NameExpr):
return False
lvalue = s.lvalues[0]
name = lvalue.name
is_named_tuple, info = self.named_tuple_analyzer.check_namedtuple(s.rvalue, name,
self.is_func_scope())
if not is_named_tuple:
return False
if not info:
self.mark_incomplete(name, lvalue, becomes_typeinfo=True)
return True
def analyze_typeddict_assign(self, s: AssignmentStmt) -> bool:
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.analyzed, TypedDictExpr):
return True # This is a valid and analyzed typed dict definition, nothing to do here.
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], NameExpr):
return False
lvalue = s.lvalues[0]
name = lvalue.name
is_typed_dict, info = self.typed_dict_analyzer.check_typeddict(s.rvalue, name,
self.is_func_scope())
if not is_typed_dict:
return False
# Yes, it's a valid typed dict, but defer if it is not ready.
if not info:
self.mark_incomplete(name, lvalue, becomes_typeinfo=True)
return True
def analyze_lvalues(self, s: AssignmentStmt) -> None:
explicit = s.unanalyzed_type is not None
if self.is_final_type(s.unanalyzed_type):
assert isinstance(s.unanalyzed_type, UnboundType)
if not s.unanalyzed_type.args:
explicit = False
for lval in s.lvalues:
self.analyze_lvalue(lval,
explicit_type=explicit,
is_final=s.is_final_def)
def apply_dynamic_class_hook(self, s: AssignmentStmt) -> None:
if len(s.lvalues) > 1:
return
lval = s.lvalues[0]
if not isinstance(lval, NameExpr) or not isinstance(s.rvalue, CallExpr):
return
call = s.rvalue
if not isinstance(call.callee, RefExpr):
return
fname = call.callee.fullname
if fname:
hook = self.plugin.get_dynamic_class_hook(fname)
if hook:
hook(DynamicClassDefContext(call, lval.name, self))
def unwrap_final(self, s: AssignmentStmt) -> bool:
if not s.unanalyzed_type or not self.is_final_type(s.unanalyzed_type):
return False
assert isinstance(s.unanalyzed_type, UnboundType)
if len(s.unanalyzed_type.args) > 1:
self.fail("Final[...] takes at most one type argument", s.unanalyzed_type)
invalid_bare_final = False
if not s.unanalyzed_type.args:
s.type = None
if isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs:
invalid_bare_final = True
self.fail("Type in Final[...] can only be omitted if there is an initializer", s)
else:
s.type = s.unanalyzed_type.args[0]
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], RefExpr):
self.fail("Invalid final declaration", s)
return False
lval = s.lvalues[0]
assert isinstance(lval, RefExpr)
if lval.is_new_def:
lval.is_inferred_def = s.type is None
if self.loop_depth > 0:
self.fail("Cannot use Final inside a loop", s)
if self.type and self.type.is_protocol:
self.msg.protocol_members_cant_be_final(s)
if (isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs and
not self.is_stub_file and not self.is_class_scope()):
if not invalid_bare_final:
self.msg.final_without_value(s)
return True
def check_final_implicit_def(self, s: AssignmentStmt) -> None:
if not s.is_final_def:
return
lval = s.lvalues[0]
assert isinstance(lval, RefExpr)
if isinstance(lval, MemberExpr):
if not self.is_self_member_ref(lval):
self.fail("Final can be only applied to a name or an attribute on self", s)
s.is_final_def = False
return
else:
assert self.function_stack
if self.function_stack[-1].name() != '__init__':
self.fail("Can only declare a final attribute in class body or __init__", s)
s.is_final_def = False
return
def store_final_status(self, s: AssignmentStmt) -> None:
if s.is_final_def:
if len(s.lvalues) == 1 and isinstance(s.lvalues[0], RefExpr):
node = s.lvalues[0].node
if isinstance(node, Var):
node.is_final = True
node.final_value = self.unbox_literal(s.rvalue)
if (self.is_class_scope() and
(isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs)):
node.final_unset_in_class = True
else:
for lval in self.flatten_lvalues(s.lvalues):
if isinstance(lval, MemberExpr) and self.is_self_member_ref(lval):
assert self.type, "Self member outside a class"
cur_node = self.type.names.get(lval.name, None)
if cur_node and isinstance(cur_node.node, Var) and cur_node.node.is_final:
assert self.function_stack
top_function = self.function_stack[-1]
if (top_function.name() == '__init__' and
cur_node.node.final_unset_in_class and
not cur_node.node.final_set_in_init and
not (isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs)):
cur_node.node.final_set_in_init = True
s.is_final_def = True
def flatten_lvalues(self, lvalues: List[Expression]) -> List[Expression]:
res = []
for lv in lvalues:
if isinstance(lv, (TupleExpr, ListExpr)):
res.extend(self.flatten_lvalues(lv.items))
else:
res.append(lv)
return res
def unbox_literal(self, e: Expression) -> Optional[Union[int, float, bool, str]]:
if isinstance(e, (IntExpr, FloatExpr, StrExpr)):
return e.value
elif isinstance(e, NameExpr) and e.name in ('True', 'False'):
return True if e.name == 'True' else False
return None
def process_type_annotation(self, s: AssignmentStmt) -> None:
if s.type:
lvalue = s.lvalues[-1]
allow_tuple_literal = isinstance(lvalue, TupleExpr)
analyzed = self.anal_type(s.type, allow_tuple_literal=allow_tuple_literal)
if analyzed is None or has_placeholder(analyzed):
return
s.type = analyzed
if (self.type and self.type.is_protocol and isinstance(lvalue, NameExpr) and
isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs):
if isinstance(lvalue.node, Var):
lvalue.node.is_abstract_var = True
else:
if (any(isinstance(lv, NameExpr) and lv.is_inferred_def for lv in s.lvalues) and
self.type and self.type.is_protocol and not self.is_func_scope()):
self.fail('All protocol members must have explicitly declared types', s)
# Set the type if the rvalue is a simple literal (even if the above error occurred).
if len(s.lvalues) == 1 and isinstance(s.lvalues[0], RefExpr):
if s.lvalues[0].is_inferred_def:
s.type = self.analyze_simple_literal_type(s.rvalue, s.is_final_def)
if s.type:
# Store type into nodes.
for lvalue in s.lvalues:
self.store_declared_types(lvalue, s.type)
def analyze_simple_literal_type(self, rvalue: Expression, is_final: bool) -> Optional[Type]:
if self.options.semantic_analysis_only or self.function_stack:
# Skip this if we're only doing the semantic analysis pass.
return None
if isinstance(rvalue, FloatExpr):
return self.named_type_or_none('builtins.float')
value = None
type_name = None
if isinstance(rvalue, IntExpr):
value, type_name = rvalue.value, 'builtins.int'
if isinstance(rvalue, StrExpr):
value, type_name = rvalue.value, 'builtins.str'
if isinstance(rvalue, BytesExpr):
value, type_name = rvalue.value, 'builtins.bytes'
if isinstance(rvalue, UnicodeExpr):
value, type_name = rvalue.value, 'builtins.unicode'
if type_name is not None:
assert value is not None
typ = self.named_type_or_none(type_name)
if typ and is_final:
return typ.copy_modified(last_known_value=LiteralType(
value=value,
fallback=typ,
line=typ.line,
column=typ.column,
))
return typ
return None
def analyze_alias(self, rvalue: Expression,
allow_placeholder: bool = False) -> Tuple[Optional[Type], List[str],
Set[str], List[str]]:
dynamic = bool(self.function_stack and self.function_stack[-1].is_dynamic())
global_scope = not self.type and not self.function_stack
res = analyze_type_alias(rvalue,
self,
self.tvar_scope,
self.plugin,
self.options,
self.is_typeshed_stub_file,
allow_unnormalized=self.is_stub_file,
allow_placeholder=allow_placeholder,
in_dynamic_func=dynamic,
global_scope=global_scope)
typ = None
if res:
typ, depends_on = res
found_type_vars = typ.accept(TypeVariableQuery(self.lookup_qualified, self.tvar_scope))
alias_tvars = [name for (name, node) in found_type_vars]
qualified_tvars = [node.fullname() for (name, node) in found_type_vars]
else:
alias_tvars = []
depends_on = set()
qualified_tvars = []
return typ, alias_tvars, depends_on, qualified_tvars
def check_and_set_up_type_alias(self, s: AssignmentStmt) -> bool:
lvalue = s.lvalues[0]
if len(s.lvalues) > 1 or not isinstance(lvalue, NameExpr):
return False
if s.unanalyzed_type is not None:
return False
existing = self.current_symbol_table().get(lvalue.name)
# A: Type[float] = int
# A = float # OK, but this doesn't define an alias
if (existing
and (isinstance(existing.node, Var) # existing variable
or (isinstance(existing.node, TypeAlias)
and not s.is_alias_def) # existing alias
or (isinstance(existing.node, PlaceholderNode)
and existing.node.node.line < s.line))): # previous incomplete definition
# TODO: find a more robust way to track the order of definitions.
# Note: if is_alias_def=True, this is just a node from previous iteration.
if isinstance(existing.node, TypeAlias) and not s.is_alias_def:
self.fail('Cannot assign multiple types to name "{}"'
' without an explicit "Type[...]" annotation'
.format(lvalue.name), lvalue)
return False
non_global_scope = self.type or self.is_func_scope()
if isinstance(s.rvalue, RefExpr) and non_global_scope:
# Fourth rule (special case): Non-subscripted right hand side creates a variable
# at class and function scopes. For example:
#
# class Model:
# ...
# class C:
# model = Model # this is automatically a variable with type 'Type[Model]'
#
# without this rule, this typical use case will require a lot of explicit
# annotations (see the second rule).
return False
rvalue = s.rvalue
if not self.can_be_type_alias(rvalue):
return False
if existing and not isinstance(existing.node, (PlaceholderNode, TypeAlias)):
# Cannot redefine existing node as type alias.
return False
res = None # type: Optional[Type]
if self.is_none_alias(rvalue):
res = NoneType()
alias_tvars, depends_on, qualified_tvars = \
[], set(), [] # type: List[str], Set[str], List[str]
else:
tag = self.track_incomplete_refs()
res, alias_tvars, depends_on, qualified_tvars = \
self.analyze_alias(rvalue, allow_placeholder=True)
if not res:
return False
# TODO: Maybe we only need to reject top-level placeholders, similar
# to base classes.
if self.found_incomplete_ref(tag) or has_placeholder(res):
# Since we have got here, we know this must be a type alias (incomplete refs
# may appear in nested positions), therefore use becomes_typeinfo=True.
self.mark_incomplete(lvalue.name, rvalue, becomes_typeinfo=True)
return True
self.add_type_alias_deps(depends_on)
# In addition to the aliases used, we add deps on unbound
# type variables, since they are erased from target type.
self.add_type_alias_deps(qualified_tvars)
# The above are only direct deps on other aliases.
# For subscripted aliases, type deps from expansion are added in deps.py
# (because the type is stored).
check_for_explicit_any(res, self.options, self.is_typeshed_stub_file, self.msg,
context=s)
# When this type alias gets "inlined", the Any is not explicit anymore,
# so we need to replace it with non-explicit Anys.
res = make_any_non_explicit(res)
no_args = isinstance(res, Instance) and not res.args # type: ignore
fix_instance_types(res, self.fail)
if isinstance(s.rvalue, (IndexExpr, CallExpr)): # CallExpr is for `void = type(None)`
s.rvalue.analyzed = TypeAliasExpr(res, alias_tvars, no_args)
s.rvalue.analyzed.line = s.line
# we use the column from resulting target, to get better location for errors
s.rvalue.analyzed.column = res.column
elif isinstance(s.rvalue, RefExpr):
s.rvalue.is_alias_rvalue = True
alias_node = TypeAlias(res, self.qualified_name(lvalue.name), s.line, s.column,
alias_tvars=alias_tvars, no_args=no_args)
if existing:
# An alias gets updated.
updated = False
if isinstance(existing.node, TypeAlias):
if existing.node.target != res:
# Copy expansion to the existing alias, this matches how we update base classes
# for a TypeInfo _in place_ if there are nested placeholders.
existing.node.target = res
existing.node.alias_tvars = alias_tvars
existing.node.no_args = no_args
updated = True
else:
# Otherwise just replace existing placeholder with type alias.
existing.node = alias_node
updated = True
if updated:
if self.final_iteration:
self.cannot_resolve_name(lvalue.name, 'name', s)
return True
else:
self.progress = True
# We need to defer so that this change can get propagated to base classes.
self.defer(s)
else:
self.add_symbol(lvalue.name, alias_node, s)
if isinstance(rvalue, RefExpr) and isinstance(rvalue.node, TypeAlias):
alias_node.normalized = rvalue.node.normalized
return True
def analyze_lvalue(self,
lval: Lvalue,
nested: bool = False,
explicit_type: bool = False,
is_final: bool = False,
escape_comprehensions: bool = False) -> None:
if escape_comprehensions:
assert isinstance(lval, NameExpr), "assignment expression target must be NameExpr"
if isinstance(lval, NameExpr):
self.analyze_name_lvalue(lval, explicit_type, is_final, escape_comprehensions)
elif isinstance(lval, MemberExpr):
self.analyze_member_lvalue(lval, explicit_type, is_final)
if explicit_type and not self.is_self_member_ref(lval):
self.fail('Type cannot be declared in assignment to non-self '
'attribute', lval)
elif isinstance(lval, IndexExpr):
if explicit_type:
self.fail('Unexpected type declaration', lval)
lval.accept(self)
elif isinstance(lval, TupleExpr):
items = lval.items
if len(items) == 0 and isinstance(lval, TupleExpr):
self.fail("can't assign to ()", lval)
self.analyze_tuple_or_list_lvalue(lval, explicit_type)
elif isinstance(lval, StarExpr):
if nested:
self.analyze_lvalue(lval.expr, nested, explicit_type)
else:
self.fail('Starred assignment target must be in a list or tuple', lval)
else:
self.fail('Invalid assignment target', lval)
def analyze_name_lvalue(self,
lvalue: NameExpr,
explicit_type: bool,
is_final: bool,
escape_comprehensions: bool) -> None:
if lvalue.node:
return
name = lvalue.name
if self.is_alias_for_final_name(name):
if is_final:
self.fail("Cannot redefine an existing name as final", lvalue)
else:
self.msg.cant_assign_to_final(name, self.type is not None, lvalue)
kind = self.current_symbol_kind()
names = self.current_symbol_table()
existing = names.get(name)
outer = self.is_global_or_nonlocal(name)
if (not existing or isinstance(existing.node, PlaceholderNode)) and not outer:
var = self.make_name_lvalue_var(lvalue, kind, not explicit_type)
added = self.add_symbol(name, var, lvalue, escape_comprehensions=escape_comprehensions)
if added:
lvalue.is_new_def = True
lvalue.is_inferred_def = True
lvalue.kind = kind
lvalue.node = var
if kind == GDEF:
lvalue.fullname = var._fullname
else:
lvalue.fullname = lvalue.name
if self.is_func_scope():
if unmangle(name) == '_':
typ = AnyType(TypeOfAny.special_form)
self.store_declared_types(lvalue, typ)
if is_final and self.is_final_redefinition(kind, name):
self.fail("Cannot redefine an existing name as final", lvalue)
else:
self.make_name_lvalue_point_to_existing_def(lvalue, explicit_type, is_final)
def is_final_redefinition(self, kind: int, name: str) -> bool:
if kind == GDEF:
return self.is_mangled_global(name) and not self.is_initial_mangled_global(name)
elif kind == MDEF and self.type:
return unmangle(name) + "'" in self.type.names
return False
def is_alias_for_final_name(self, name: str) -> bool:
if self.is_func_scope():
if not name.endswith("'"):
return False
name = unmangle(name)
assert self.locals[-1] is not None, "No locals at function scope"
existing = self.locals[-1].get(name)
return existing is not None and is_final_node(existing.node)
elif self.type is not None:
orig_name = unmangle(name) + "'"
if name == orig_name:
return False
existing = self.type.names.get(orig_name)
return existing is not None and is_final_node(existing.node)
else:
orig_name = unmangle(name) + "'"
if name == orig_name:
return False
existing = self.globals.get(orig_name)
return existing is not None and is_final_node(existing.node)
def make_name_lvalue_var(self, lvalue: NameExpr, kind: int, inferred: bool) -> Var:
v = Var(lvalue.name)
v.set_line(lvalue)
v.is_inferred = inferred
if kind == MDEF:
assert self.type is not None
v.info = self.type
v.is_initialized_in_class = True
if kind != LDEF:
v._fullname = self.qualified_name(lvalue.name)
else:
# fullanme should never stay None
v._fullname = lvalue.name
v.is_ready = False # Type not inferred yet
return v
def make_name_lvalue_point_to_existing_def(
self,
lval: NameExpr,
explicit_type: bool,
is_final: bool) -> None:
if is_final:
# Redefining an existing name with final is always an error.
self.fail("Cannot redefine an existing name as final", lval)
original_def = self.lookup(lval.name, lval, suppress_errors=True)
if original_def is None and self.type and not self.is_func_scope():
# Workaround to allow "x, x = ..." in class body.
original_def = self.type.get(lval.name)
if explicit_type:
# Don't re-bind if there is a type annotation.
self.name_already_defined(lval.name, lval, original_def)
else:
if original_def:
self.bind_name_expr(lval, original_def)
else:
self.name_not_defined(lval.name, lval)
self.check_lvalue_validity(lval.node, lval)
def analyze_tuple_or_list_lvalue(self, lval: TupleExpr,
explicit_type: bool = False) -> None:
items = lval.items
star_exprs = [item for item in items if isinstance(item, StarExpr)]
if len(star_exprs) > 1:
self.fail('Two starred expressions in assignment', lval)
else:
if len(star_exprs) == 1:
star_exprs[0].valid = True
for i in items:
self.analyze_lvalue(i, nested=True, explicit_type=explicit_type)
def analyze_member_lvalue(self, lval: MemberExpr, explicit_type: bool, is_final: bool) -> None:
if lval.node:
return
lval.accept(self)
if self.is_self_member_ref(lval):
assert self.type, "Self member outside a class"
cur_node = self.type.names.get(lval.name)
node = self.type.get(lval.name)
if cur_node and is_final:
self.fail("Cannot redefine an existing name as final", lval)
if (not lval.node and cur_node and isinstance(cur_node.node, Var) and
cur_node.node.is_inferred and explicit_type):
self.attribute_already_defined(lval.name, lval, cur_node)
# If the attribute of self is not defined in superclasses, create a new Var, ...
if (node is None
or (isinstance(node.node, Var) and node.node.is_abstract_var)
# ... also an explicit declaration on self also creates a new Var.
# Note that `explicit_type` might has been erased for bare `Final`,
# so we also check if `is_final` is passed.
or (cur_node is None and (explicit_type or is_final))):
if self.type.is_protocol and node is None:
self.fail("Protocol members cannot be defined via assignment to self", lval)
else:
# Implicit attribute definition in __init__.
lval.is_new_def = True
lval.is_inferred_def = True
v = Var(lval.name)
v.set_line(lval)
v._fullname = self.qualified_name(lval.name)
v.info = self.type
v.is_ready = False
v.explicit_self_type = explicit_type or is_final
lval.def_var = v
lval.node = v
# TODO: should we also set lval.kind = MDEF?
self.type.names[lval.name] = SymbolTableNode(MDEF, v, implicit=True)
self.check_lvalue_validity(lval.node, lval)
def is_self_member_ref(self, memberexpr: MemberExpr) -> bool:
if not isinstance(memberexpr.expr, NameExpr):
return False
node = memberexpr.expr.node
return isinstance(node, Var) and node.is_self
def check_lvalue_validity(self, node: Union[Expression, SymbolNode, None],
ctx: Context) -> None:
if isinstance(node, TypeVarExpr):
self.fail('Invalid assignment target', ctx)
elif isinstance(node, TypeInfo):
self.fail(message_registry.CANNOT_ASSIGN_TO_TYPE, ctx)
def store_declared_types(self, lvalue: Lvalue, typ: Type) -> None:
if isinstance(typ, StarType) and not isinstance(lvalue, StarExpr):
self.fail('Star type only allowed for starred expressions', lvalue)
if isinstance(lvalue, RefExpr):
lvalue.is_inferred_def = False
if isinstance(lvalue.node, Var):
var = lvalue.node
var.type = typ
var.is_ready = True
# If node is not a variable, we'll catch it elsewhere.
elif isinstance(lvalue, TupleExpr):
typ = get_proper_type(typ)
if isinstance(typ, TupleType):
if len(lvalue.items) != len(typ.items):
self.fail('Incompatible number of tuple items', lvalue)
return
for item, itemtype in zip(lvalue.items, typ.items):
self.store_declared_types(item, itemtype)
else:
self.fail('Tuple type expected for multiple variables',
lvalue)
elif isinstance(lvalue, StarExpr):
if isinstance(typ, StarType):
self.store_declared_types(lvalue.expr, typ.type)
else:
self.store_declared_types(lvalue.expr, typ)
else:
pass
def process_typevar_declaration(self, s: AssignmentStmt) -> bool:
call = self.get_typevar_declaration(s)
if not call:
return False
lvalue = s.lvalues[0]
assert isinstance(lvalue, NameExpr)
if s.type:
self.fail("Cannot declare the type of a type variable", s)
return False
name = lvalue.name
if not self.check_typevar_name(call, name, s):
return False
n_values = call.arg_kinds[1:].count(ARG_POS)
values = self.analyze_value_types(call.args[1:1 + n_values])
res = self.process_typevar_parameters(call.args[1 + n_values:],
call.arg_names[1 + n_values:],
call.arg_kinds[1 + n_values:],
n_values,
s)
if res is None:
return False
variance, upper_bound = res
existing = self.current_symbol_table().get(name)
if existing and not (isinstance(existing.node, PlaceholderNode) or
(isinstance(existing.node, TypeVarExpr) and
existing.node is call.analyzed)):
self.fail("Cannot redefine '%s' as a type variable" % name, s)
return False
if self.options.disallow_any_unimported:
for idx, constraint in enumerate(values, start=1):
if has_any_from_unimported_type(constraint):
prefix = "Constraint {}".format(idx)
self.msg.unimported_type_becomes_any(prefix, constraint, s)
if has_any_from_unimported_type(upper_bound):
prefix = "Upper bound of type variable"
self.msg.unimported_type_becomes_any(prefix, upper_bound, s)
for t in values + [upper_bound]:
check_for_explicit_any(t, self.options, self.is_typeshed_stub_file, self.msg,
context=s)
if values and self.options.mypyc:
upper_bound = AnyType(TypeOfAny.implementation_artifact)
if not call.analyzed:
type_var = TypeVarExpr(name, self.qualified_name(name),
values, upper_bound, variance)
type_var.line = call.line
call.analyzed = type_var
else:
assert isinstance(call.analyzed, TypeVarExpr)
if call.analyzed.values != values or call.analyzed.upper_bound != upper_bound:
self.progress = True
call.analyzed.upper_bound = upper_bound
call.analyzed.values = values
self.add_symbol(name, call.analyzed, s)
return True
def check_typevar_name(self, call: CallExpr, name: str, context: Context) -> bool:
name = unmangle(name)
if len(call.args) < 1:
self.fail("Too few arguments for TypeVar()", context)
return False
if (not isinstance(call.args[0], (StrExpr, BytesExpr, UnicodeExpr))
or not call.arg_kinds[0] == ARG_POS):
self.fail("TypeVar() expects a string literal as first argument", context)
return False
elif call.args[0].value != name:
msg = "String argument 1 '{}' to TypeVar(...) does not match variable name '{}'"
self.fail(msg.format(call.args[0].value, name), context)
return False
return True
def get_typevar_declaration(self, s: AssignmentStmt) -> Optional[CallExpr]:
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], NameExpr):
return None
if not isinstance(s.rvalue, CallExpr):
return None
call = s.rvalue
callee = call.callee
if not isinstance(callee, RefExpr):
return None
if callee.fullname != 'typing.TypeVar':
return None
return call
def process_typevar_parameters(self, args: List[Expression],
names: List[Optional[str]],
kinds: List[int],
num_values: int,
context: Context) -> Optional[Tuple[int, Type]]:
has_values = (num_values > 0)
covariant = False
contravariant = False
upper_bound = self.object_type() # type: Type
for param_value, param_name, param_kind in zip(args, names, kinds):
if not param_kind == ARG_NAMED:
self.fail("Unexpected argument to TypeVar()", context)
return None
if param_name == 'covariant':
if isinstance(param_value, NameExpr):
if param_value.name == 'True':
covariant = True
else:
self.fail("TypeVar 'covariant' may only be 'True'", context)
return None
else:
self.fail("TypeVar 'covariant' may only be 'True'", context)
return None
elif param_name == 'contravariant':
if isinstance(param_value, NameExpr):
if param_value.name == 'True':
contravariant = True
else:
self.fail("TypeVar 'contravariant' may only be 'True'", context)
return None
else:
self.fail("TypeVar 'contravariant' may only be 'True'", context)
return None
elif param_name == 'bound':
if has_values:
self.fail("TypeVar cannot have both values and an upper bound", context)
return None
try:
# We want to use our custom error message below, so we suppress
# the default error message for invalid types here.
analyzed = self.expr_to_analyzed_type(param_value,
allow_placeholder=True,
report_invalid_types=False)
if analyzed is None:
# Type variables are special: we need to place them in the symbol table
# soon, even if upper bound is not ready yet. Otherwise avoiding
# a "deadlock" in this common pattern would be tricky:
# T = TypeVar('T', bound=Custom[Any])
# class Custom(Generic[T]):
# ...
analyzed = PlaceholderType(None, [], context.line)
upper_bound = get_proper_type(analyzed)
if isinstance(upper_bound, AnyType) and upper_bound.is_from_error:
self.fail("TypeVar 'bound' must be a type", param_value)
# Note: we do not return 'None' here -- we want to continue
# using the AnyType as the upper bound.
except TypeTranslationError:
self.fail("TypeVar 'bound' must be a type", param_value)
return None
elif param_name == 'values':
# Probably using obsolete syntax with values=(...). Explain the current syntax.
self.fail("TypeVar 'values' argument not supported", context)
self.fail("Use TypeVar('T', t, ...) instead of TypeVar('T', values=(t, ...))",
context)
return None
else:
self.fail("Unexpected argument to TypeVar(): {}".format(param_name), context)
return None
if covariant and contravariant:
self.fail("TypeVar cannot be both covariant and contravariant", context)
return None
elif num_values == 1:
self.fail("TypeVar cannot have only a single constraint", context)
return None
elif covariant:
variance = COVARIANT
elif contravariant:
variance = CONTRAVARIANT
else:
variance = INVARIANT
return variance, upper_bound
def basic_new_typeinfo(self, name: str, basetype_or_fallback: Instance) -> TypeInfo:
class_def = ClassDef(name, Block([]))
if self.is_func_scope() and not self.type:
# Full names of generated classes should always be prefixed with the module names
# even if they are nested in a function, since these classes will be (de-)serialized.
# (Note that the caller should append @line to the name to avoid collisions.)
# TODO: clean this up, see #6422.
class_def.fullname = self.cur_mod_id + '.' + self.qualified_name(name)
else:
class_def.fullname = self.qualified_name(name)
info = TypeInfo(SymbolTable(), class_def, self.cur_mod_id)
class_def.info = info
mro = basetype_or_fallback.type.mro
if not mro:
# Forward reference, MRO should be recalculated in third pass.
mro = [basetype_or_fallback.type, self.object_type().type]
info.mro = [info] + mro
info.bases = [basetype_or_fallback]
return info
def analyze_value_types(self, items: List[Expression]) -> List[Type]:
result = [] # type: List[Type]
for node in items:
try:
analyzed = self.anal_type(expr_to_unanalyzed_type(node),
allow_placeholder=True)
if analyzed is None:
# Type variables are special: we need to place them in the symbol table
# soon, even if some value is not ready yet, see process_typevar_parameters()
# for an example.
analyzed = PlaceholderType(None, [], node.line)
result.append(analyzed)
except TypeTranslationError:
self.fail('Type expected', node)
result.append(AnyType(TypeOfAny.from_error))
return result
def check_classvar(self, s: AssignmentStmt) -> None:
lvalue = s.lvalues[0]
if len(s.lvalues) != 1 or not isinstance(lvalue, RefExpr):
return
if not s.type or not self.is_classvar(s.type):
return
if self.is_class_scope() and isinstance(lvalue, NameExpr):
node = lvalue.node
if isinstance(node, Var):
node.is_classvar = True
elif not isinstance(lvalue, MemberExpr) or self.is_self_member_ref(lvalue):
# In case of member access, report error only when assigning to self
# Other kinds of member assignments should be already reported
self.fail_invalid_classvar(lvalue)
def is_classvar(self, typ: Type) -> bool:
if not isinstance(typ, UnboundType):
return False
sym = self.lookup_qualified(typ.name, typ)
if not sym or not sym.node:
return False
return sym.node.fullname() == 'typing.ClassVar'
def is_final_type(self, typ: Optional[Type]) -> bool:
if not isinstance(typ, UnboundType):
return False
sym = self.lookup_qualified(typ.name, typ)
if not sym or not sym.node:
return False
return sym.node.fullname() in ('typing.Final',
'typing_extensions.Final')
def fail_invalid_classvar(self, context: Context) -> None:
self.fail('ClassVar can only be used for assignments in class body', context)
def process_module_assignment(self, lvals: List[Lvalue], rval: Expression,
ctx: AssignmentStmt) -> None:
if (isinstance(rval, (TupleExpr, ListExpr))
and all(isinstance(v, TupleExpr) for v in lvals)):
# rval and all lvals are either list or tuple, so we are dealing
# with unpacking assignment like `x, y = a, b`. Mypy didn't
seq_lvals = cast(List[TupleExpr], lvals)
elementwise_assignments = zip(rval.items, *[v.items for v in seq_lvals])
for rv, *lvs in elementwise_assignments:
self.process_module_assignment(lvs, rv, ctx)
elif isinstance(rval, RefExpr):
rnode = self.lookup_type_node(rval)
if rnode and isinstance(rnode.node, MypyFile):
for lval in lvals:
if not isinstance(lval, NameExpr):
continue
if (isinstance(lval.node, Var) and lval.node.type is not None):
continue
lnode = self.current_symbol_table().get(lval.name)
if lnode:
if isinstance(lnode.node, MypyFile) and lnode.node is not rnode.node:
self.fail(
"Cannot assign multiple modules to name '{}' "
"without explicit 'types.ModuleType' annotation".format(lval.name),
ctx)
elif lval.is_inferred_def:
lnode.kind = self.current_symbol_kind()
assert rnode.node is not None
lnode.node = rnode.node
def process__all__(self, s: AssignmentStmt) -> None:
if (len(s.lvalues) == 1 and isinstance(s.lvalues[0], NameExpr) and
s.lvalues[0].name == '__all__' and s.lvalues[0].kind == GDEF and
isinstance(s.rvalue, (ListExpr, TupleExpr))):
self.add_exports(s.rvalue.items)
def visit_block(self, b: Block) -> None:
if b.is_unreachable:
return
self.block_depth[-1] += 1
for s in b.body:
self.accept(s)
self.block_depth[-1] -= 1
def visit_block_maybe(self, b: Optional[Block]) -> None:
if b:
self.visit_block(b)
def visit_expression_stmt(self, s: ExpressionStmt) -> None:
self.statement = s
s.expr.accept(self)
def visit_return_stmt(self, s: ReturnStmt) -> None:
self.statement = s
if not self.is_func_scope():
self.fail("'return' outside function", s)
if s.expr:
s.expr.accept(self)
def visit_raise_stmt(self, s: RaiseStmt) -> None:
self.statement = s
if s.expr:
s.expr.accept(self)
if s.from_expr:
s.from_expr.accept(self)
def visit_assert_stmt(self, s: AssertStmt) -> None:
self.statement = s
if s.expr:
s.expr.accept(self)
if s.msg:
s.msg.accept(self)
def visit_operator_assignment_stmt(self,
s: OperatorAssignmentStmt) -> None:
self.statement = s
s.lvalue.accept(self)
s.rvalue.accept(self)
if (isinstance(s.lvalue, NameExpr) and s.lvalue.name == '__all__' and
s.lvalue.kind == GDEF and isinstance(s.rvalue, (ListExpr, TupleExpr))):
self.add_exports(s.rvalue.items)
def visit_while_stmt(self, s: WhileStmt) -> None:
self.statement = s
s.expr.accept(self)
self.loop_depth += 1
s.body.accept(self)
self.loop_depth -= 1
self.visit_block_maybe(s.else_body)
def visit_for_stmt(self, s: ForStmt) -> None:
self.statement = s
s.expr.accept(self)
self.analyze_lvalue(s.index, explicit_type=s.index_type is not None)
if s.index_type:
if self.is_classvar(s.index_type):
self.fail_invalid_classvar(s.index)
allow_tuple_literal = isinstance(s.index, TupleExpr)
analyzed = self.anal_type(s.index_type, allow_tuple_literal=allow_tuple_literal)
if analyzed is not None:
self.store_declared_types(s.index, analyzed)
s.index_type = analyzed
self.loop_depth += 1
self.visit_block(s.body)
self.loop_depth -= 1
self.visit_block_maybe(s.else_body)
def visit_break_stmt(self, s: BreakStmt) -> None:
self.statement = s
if self.loop_depth == 0:
self.fail("'break' outside loop", s, serious=True, blocker=True)
def visit_continue_stmt(self, s: ContinueStmt) -> None:
self.statement = s
if self.loop_depth == 0:
self.fail("'continue' outside loop", s, serious=True, blocker=True)
def visit_if_stmt(self, s: IfStmt) -> None:
self.statement = s
infer_reachability_of_if_statement(s, self.options)
for i in range(len(s.expr)):
s.expr[i].accept(self)
self.visit_block(s.body[i])
self.visit_block_maybe(s.else_body)
def visit_try_stmt(self, s: TryStmt) -> None:
self.statement = s
self.analyze_try_stmt(s, self)
def analyze_try_stmt(self, s: TryStmt, visitor: NodeVisitor[None]) -> None:
s.body.accept(visitor)
for type, var, handler in zip(s.types, s.vars, s.handlers):
if type:
type.accept(visitor)
if var:
self.analyze_lvalue(var)
handler.accept(visitor)
if s.else_body:
s.else_body.accept(visitor)
if s.finally_body:
s.finally_body.accept(visitor)
def visit_with_stmt(self, s: WithStmt) -> None:
self.statement = s
types = []
if s.unanalyzed_type:
assert isinstance(s.unanalyzed_type, ProperType)
actual_targets = [t for t in s.target if t is not None]
if len(actual_targets) == 0:
self.fail('Invalid type comment: "with" statement has no targets', s)
elif len(actual_targets) == 1:
types = [s.unanalyzed_type]
elif isinstance(s.unanalyzed_type, TupleType):
if len(actual_targets) == len(s.unanalyzed_type.items):
types = s.unanalyzed_type.items.copy()
else:
self.fail('Incompatible number of types for "with" targets', s)
else:
# We have multiple targets and one type
self.fail('Multiple types expected for multiple "with" targets', s)
new_types = [] # type: List[Type]
for e, n in zip(s.expr, s.target):
e.accept(self)
if n:
self.analyze_lvalue(n, explicit_type=s.unanalyzed_type is not None)
# Since we have a target, pop the next type from types
if types:
t = types.pop(0)
if self.is_classvar(t):
self.fail_invalid_classvar(n)
allow_tuple_literal = isinstance(n, TupleExpr)
analyzed = self.anal_type(t, allow_tuple_literal=allow_tuple_literal)
if analyzed is not None:
# TODO: Deal with this better
new_types.append(analyzed)
self.store_declared_types(n, analyzed)
s.analyzed_types = new_types
self.visit_block(s.body)
def visit_del_stmt(self, s: DelStmt) -> None:
self.statement = s
s.expr.accept(self)
if not self.is_valid_del_target(s.expr):
self.fail('Invalid delete target', s)
def is_valid_del_target(self, s: Expression) -> bool:
if isinstance(s, (IndexExpr, NameExpr, MemberExpr)):
return True
elif isinstance(s, (TupleExpr, ListExpr)):
return all(self.is_valid_del_target(item) for item in s.items)
else:
return False
def visit_global_decl(self, g: GlobalDecl) -> None:
self.statement = g
for name in g.names:
if name in self.nonlocal_decls[-1]:
self.fail("Name '{}' is nonlocal and global".format(name), g)
self.global_decls[-1].add(name)
def visit_nonlocal_decl(self, d: NonlocalDecl) -> None:
self.statement = d
if not self.is_func_scope():
self.fail("nonlocal declaration not allowed at module level", d)
else:
for name in d.names:
for table in reversed(self.locals[:-1]):
if table is not None and name in table:
break
else:
self.fail("No binding for nonlocal '{}' found".format(name), d)
if self.locals[-1] is not None and name in self.locals[-1]:
self.fail("Name '{}' is already defined in local "
"scope before nonlocal declaration".format(name), d)
if name in self.global_decls[-1]:
self.fail("Name '{}' is nonlocal and global".format(name), d)
self.nonlocal_decls[-1].add(name)
def visit_print_stmt(self, s: PrintStmt) -> None:
self.statement = s
for arg in s.args:
arg.accept(self)
if s.target:
s.target.accept(self)
def visit_exec_stmt(self, s: ExecStmt) -> None:
self.statement = s
s.expr.accept(self)
if s.globals:
s.globals.accept(self)
if s.locals:
s.locals.accept(self)
#
# Expressions
#
def visit_name_expr(self, expr: NameExpr) -> None:
n = self.lookup(expr.name, expr)
if n:
self.bind_name_expr(expr, n)
def bind_name_expr(self, expr: NameExpr, sym: SymbolTableNode) -> None:
if isinstance(sym.node, TypeVarExpr) and self.tvar_scope.get_binding(sym):
self.fail("'{}' is a type variable and only valid in type "
"context".format(expr.name), expr)
elif isinstance(sym.node, PlaceholderNode):
self.process_placeholder(expr.name, 'name', expr)
else:
expr.kind = sym.kind
expr.node = sym.node
expr.fullname = sym.fullname
def visit_super_expr(self, expr: SuperExpr) -> None:
if not self.type and not expr.call.args:
self.fail('"super" used outside class', expr)
return
expr.info = self.type
for arg in expr.call.args:
arg.accept(self)
def visit_tuple_expr(self, expr: TupleExpr) -> None:
for item in expr.items:
if isinstance(item, StarExpr):
item.valid = True
item.accept(self)
def visit_list_expr(self, expr: ListExpr) -> None:
for item in expr.items:
if isinstance(item, StarExpr):
item.valid = True
item.accept(self)
def visit_set_expr(self, expr: SetExpr) -> None:
for item in expr.items:
if isinstance(item, StarExpr):
item.valid = True
item.accept(self)
def visit_dict_expr(self, expr: DictExpr) -> None:
for key, value in expr.items:
if key is not None:
key.accept(self)
value.accept(self)
def visit_star_expr(self, expr: StarExpr) -> None:
if not expr.valid:
# XXX TODO Change this error message
self.fail('Can use starred expression only as assignment target', expr)
else:
expr.expr.accept(self)
def visit_yield_from_expr(self, e: YieldFromExpr) -> None:
if not self.is_func_scope(): # not sure
self.fail("'yield from' outside function", e, serious=True, blocker=True)
else:
if self.function_stack[-1].is_coroutine:
self.fail("'yield from' in async function", e, serious=True, blocker=True)
else:
self.function_stack[-1].is_generator = True
if e.expr:
e.expr.accept(self)
def visit_call_expr(self, expr: CallExpr) -> None:
expr.callee.accept(self)
if refers_to_fullname(expr.callee, 'typing.cast'):
# Special form cast(...).
if not self.check_fixed_args(expr, 2, 'cast'):
return
# Translate first argument to an unanalyzed type.
try:
target = expr_to_unanalyzed_type(expr.args[0])
except TypeTranslationError:
self.fail('Cast target is not a type', expr)
return
# Piggyback CastExpr object to the CallExpr object; it takes
# precedence over the CallExpr semantics.
expr.analyzed = CastExpr(expr.args[1], target)
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'builtins.reveal_type'):
if not self.check_fixed_args(expr, 1, 'reveal_type'):
return
expr.analyzed = RevealExpr(kind=REVEAL_TYPE, expr=expr.args[0])
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'builtins.reveal_locals'):
# Store the local variable names into the RevealExpr for use in the
# type checking pass
local_nodes = [] # type: List[Var]
if self.is_module_scope():
# try to determine just the variable declarations in module scope
# self.globals.values() contains SymbolTableNode's
local_nodes = [n.node
for name, n in self.globals.items()
if getattr(n.node, 'is_inferred', False)
and isinstance(n.node, Var)]
elif self.is_class_scope():
ype is not None:
local_nodes = [st.node
for st in self.type.names.values()
if isinstance(st.node, Var)]
elif self.is_func_scope():
not None:
symbol_table = self.locals[-1]
if symbol_table is not None:
local_nodes = [st.node
for st in symbol_table.values()
if isinstance(st.node, Var)]
expr.analyzed = RevealExpr(kind=REVEAL_LOCALS, local_nodes=local_nodes)
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'typing.Any'):
self.fail('Any(...) is no longer supported. Use cast(Any, ...) instead', expr)
elif refers_to_fullname(expr.callee, 'typing._promote'):
if not self.check_fixed_args(expr, 1, '_promote'):
return
try:
target = expr_to_unanalyzed_type(expr.args[0])
except TypeTranslationError:
self.fail('Argument 1 to _promote is not a type', expr)
return
expr.analyzed = PromoteExpr(target)
expr.analyzed.line = expr.line
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'builtins.dict'):
expr.analyzed = self.translate_dict_call(expr)
elif refers_to_fullname(expr.callee, 'builtins.divmod'):
if not self.check_fixed_args(expr, 2, 'divmod'):
return
expr.analyzed = OpExpr('divmod', expr.args[0], expr.args[1])
expr.analyzed.line = expr.line
expr.analyzed.accept(self)
else:
for a in expr.args:
a.accept(self)
if (isinstance(expr.callee, MemberExpr) and
isinstance(expr.callee.expr, NameExpr) and
expr.callee.expr.name == '__all__' and
expr.callee.expr.kind == GDEF and
expr.callee.name in ('append', 'extend')):
if expr.callee.name == 'append' and expr.args:
self.add_exports(expr.args[0])
elif (expr.callee.name == 'extend' and expr.args and
isinstance(expr.args[0], (ListExpr, TupleExpr))):
self.add_exports(expr.args[0].items)
def translate_dict_call(self, call: CallExpr) -> Optional[DictExpr]:
if not call.args:
return None
if not all(kind == ARG_NAMED for kind in call.arg_kinds):
for a in call.args:
a.accept(self)
return None
expr = DictExpr([(StrExpr(cast(str, key)), value)
for key, value in zip(call.arg_names, call.args)])
expr.set_line(call)
expr.accept(self)
return expr
def check_fixed_args(self, expr: CallExpr, numargs: int,
name: str) -> bool:
s = 's'
if numargs == 1:
s = ''
if len(expr.args) != numargs:
self.fail("'%s' expects %d argument%s" % (name, numargs, s),
expr)
return False
if expr.arg_kinds != [ARG_POS] * numargs:
self.fail("'%s' must be called with %s positional argument%s" %
(name, numargs, s), expr)
return False
return True
def visit_member_expr(self, expr: MemberExpr) -> None:
base = expr.expr
base.accept(self)
if isinstance(base, RefExpr) and isinstance(base.node, MypyFile):
sym = self.get_module_symbol(base.node, expr.name)
if sym:
if isinstance(sym.node, PlaceholderNode):
self.process_placeholder(expr.name, 'attribute', expr)
return
expr.kind = sym.kind
expr.fullname = sym.fullname
expr.node = sym.node
elif isinstance(base, RefExpr):
# namespace. This is done only when bar is a module or a type;
# other things (e.g. methods) are handled by other code in
# checkmember.
type_info = None
if isinstance(base.node, TypeInfo):
# C.bar where C is a class
type_info = base.node
elif isinstance(base.node, Var) and self.type and self.function_stack:
# check for self.bar or cls.bar in method/classmethod
func_def = self.function_stack[-1]
if not func_def.is_static and isinstance(func_def.type, CallableType):
formal_arg = func_def.type.argument_by_name(base.node.name())
if formal_arg and formal_arg.pos == 0:
type_info = self.type
elif isinstance(base.node, TypeAlias) and base.node.no_args:
assert isinstance(base.node.target, ProperType)
# TODO: support chained aliases.
if isinstance(base.node.target, Instance):
type_info = base.node.target.type
if type_info:
n = type_info.names.get(expr.name)
if n is not None and isinstance(n.node, (MypyFile, TypeInfo, TypeAlias)):
if not n:
return
expr.kind = n.kind
expr.fullname = n.fullname
expr.node = n.node
def visit_op_expr(self, expr: OpExpr) -> None:
expr.left.accept(self)
if expr.op in ('and', 'or'):
inferred = infer_condition_value(expr.left, self.options)
if ((inferred in (ALWAYS_FALSE, MYPY_FALSE) and expr.op == 'and') or
(inferred in (ALWAYS_TRUE, MYPY_TRUE) and expr.op == 'or')):
expr.right_unreachable = True
return
elif ((inferred in (ALWAYS_TRUE, MYPY_TRUE) and expr.op == 'and') or
(inferred in (ALWAYS_FALSE, MYPY_FALSE) and expr.op == 'or')):
expr.right_always = True
expr.right.accept(self)
def visit_comparison_expr(self, expr: ComparisonExpr) -> None:
for operand in expr.operands:
operand.accept(self)
def visit_unary_expr(self, expr: UnaryExpr) -> None:
expr.expr.accept(self)
def visit_index_expr(self, expr: IndexExpr) -> None:
base = expr.base
base.accept(self)
if (isinstance(base, RefExpr)
and isinstance(base.node, TypeInfo)
and not base.node.is_generic()):
expr.index.accept(self)
elif ((isinstance(base, RefExpr) and isinstance(base.node, TypeAlias))
or refers_to_class_or_function(base)):
# We need to do full processing on every iteration, since some type
# arguments may contain placeholder types.
self.analyze_type_application(expr)
else:
expr.index.accept(self)
def analyze_type_application(self, expr: IndexExpr) -> None:
types = self.analyze_type_application_args(expr)
if types is None:
return
base = expr.base
expr.analyzed = TypeApplication(base, types)
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
# Types list, dict, set are not subscriptable, prohibit this if
# subscripted either via type alias...
if isinstance(base, RefExpr) and isinstance(base.node, TypeAlias):
alias = base.node
target = get_proper_type(alias.target)
if isinstance(target, Instance):
name = target.type.fullname()
if (alias.no_args and # this avoids bogus errors for already reported aliases
name in nongen_builtins and not alias.normalized):
self.fail(no_subscript_builtin_alias(name, propose_alt=False), expr)
# ...or directly.
else:
n = self.lookup_type_node(base)
if n and n.fullname in nongen_builtins:
self.fail(no_subscript_builtin_alias(n.fullname, propose_alt=False), expr)
def analyze_type_application_args(self, expr: IndexExpr) -> Optional[List[Type]]:
index = expr.index
tag = self.track_incomplete_refs()
self.analyze_type_expr(index)
if self.found_incomplete_ref(tag):
return None
types = [] # type: List[Type]
if isinstance(index, TupleExpr):
items = index.items
else:
items = [index]
for item in items:
try:
typearg = expr_to_unanalyzed_type(item)
except TypeTranslationError:
self.fail('Type expected within [...]', expr)
return None
# We always allow unbound type variables in IndexExpr, since we
# may be analysing a type alias definition rvalue. The error will be
# reported elsewhere if it is not the case.
analyzed = self.anal_type(typearg, allow_unbound_tvars=True,
allow_placeholder=True)
if analyzed is None:
return None
types.append(analyzed)
return types
def visit_slice_expr(self, expr: SliceExpr) -> None:
if expr.begin_index:
expr.begin_index.accept(self)
if expr.end_index:
expr.end_index.accept(self)
if expr.stride:
expr.stride.accept(self)
def visit_cast_expr(self, expr: CastExpr) -> None:
expr.expr.accept(self)
analyzed = self.anal_type(expr.type)
if analyzed is not None:
expr.type = analyzed
def visit_reveal_expr(self, expr: RevealExpr) -> None:
if expr.kind == REVEAL_TYPE:
if expr.expr is not None:
expr.expr.accept(self)
else:
# Reveal locals doesn't have an inner expression, there's no
# need to traverse inside it
pass
def visit_type_application(self, expr: TypeApplication) -> None:
expr.expr.accept(self)
for i in range(len(expr.types)):
analyzed = self.anal_type(expr.types[i])
if analyzed is not None:
expr.types[i] = analyzed
def visit_list_comprehension(self, expr: ListComprehension) -> None:
expr.generator.accept(self)
def visit_set_comprehension(self, expr: SetComprehension) -> None:
expr.generator.accept(self)
def visit_dictionary_comprehension(self, expr: DictionaryComprehension) -> None:
self.enter(expr)
self.analyze_comp_for(expr)
expr.key.accept(self)
expr.value.accept(self)
self.leave()
self.analyze_comp_for_2(expr)
def visit_generator_expr(self, expr: GeneratorExpr) -> None:
self.enter(expr)
self.analyze_comp_for(expr)
expr.left_expr.accept(self)
self.leave()
self.analyze_comp_for_2(expr)
def analyze_comp_for(self, expr: Union[GeneratorExpr,
DictionaryComprehension]) -> None:
for i, (index, sequence, conditions) in enumerate(zip(expr.indices,
expr.sequences,
expr.condlists)):
if i > 0:
sequence.accept(self)
# Bind index variables.
self.analyze_lvalue(index)
for cond in conditions:
cond.accept(self)
def analyze_comp_for_2(self, expr: Union[GeneratorExpr,
DictionaryComprehension]) -> None:
expr.sequences[0].accept(self)
def visit_lambda_expr(self, expr: LambdaExpr) -> None:
self.analyze_arg_initializers(expr)
self.analyze_function_body(expr)
def visit_conditional_expr(self, expr: ConditionalExpr) -> None:
expr.if_expr.accept(self)
expr.cond.accept(self)
expr.else_expr.accept(self)
def visit_backquote_expr(self, expr: BackquoteExpr) -> None:
expr.expr.accept(self)
def visit__promote_expr(self, expr: PromoteExpr) -> None:
analyzed = self.anal_type(expr.type)
if analyzed is not None:
expr.type = analyzed
def visit_yield_expr(self, expr: YieldExpr) -> None:
if not self.is_func_scope():
self.fail("'yield' outside function", expr, serious=True, blocker=True)
else:
if self.function_stack[-1].is_coroutine:
if self.options.python_version < (3, 6):
self.fail("'yield' in async function", expr, serious=True, blocker=True)
else:
self.function_stack[-1].is_generator = True
self.function_stack[-1].is_async_generator = True
else:
self.function_stack[-1].is_generator = True
if expr.expr:
expr.expr.accept(self)
def visit_await_expr(self, expr: AwaitExpr) -> None:
if not self.is_func_scope():
self.fail("'await' outside function", expr)
elif not self.function_stack[-1].is_coroutine:
self.fail("'await' outside coroutine ('async def')", expr)
expr.expr.accept(self)
#
# Lookup functions
#
def lookup(self, name: str, ctx: Context,
suppress_errors: bool = False) -> Optional[SymbolTableNode]:
implicit_name = False
# 1a. Name declared using 'global x' takes precedence
if name in self.global_decls[-1]:
if name in self.globals:
return self.globals[name]
if not suppress_errors:
self.name_not_defined(name, ctx)
return None
# 1b. Name declared using 'nonlocal x' takes precedence
if name in self.nonlocal_decls[-1]:
for table in reversed(self.locals[:-1]):
if table is not None and name in table:
return table[name]
else:
if not suppress_errors:
self.name_not_defined(name, ctx)
return None
# 2. Class attributes (if within class definition)
if self.type and not self.is_func_scope() and name in self.type.names:
node = self.type.names[name]
if not node.implicit:
if self.is_active_symbol_in_class_body(node.node):
return node
else:
# Defined through self.x assignment
implicit_name = True
implicit_node = node
# 3. Local (function) scopes
for table in reversed(self.locals):
if table is not None and name in table:
return table[name]
# 4. Current file global scope
if name in self.globals:
return self.globals[name]
# 5. Builtins
b = self.globals.get('__builtins__', None)
if b:
assert isinstance(b.node, MypyFile)
table = b.node.names
if name in table:
if name[0] == "_" and name[1] != "_":
if not suppress_errors:
self.name_not_defined(name, ctx)
return None
node = table[name]
return node
# Give up.
if not implicit_name and not suppress_errors:
self.name_not_defined(name, ctx)
else:
if implicit_name:
return implicit_node
return None
def is_active_symbol_in_class_body(self, node: Optional[SymbolNode]) -> bool:
# TODO: Forward reference to name imported in class body is not
# caught.
assert self.statement # we are at class scope
return (node is None
or node.line < self.statement.line
or not self.is_defined_in_current_module(node.fullname())
or isinstance(node, TypeInfo)
or (isinstance(node, PlaceholderNode) and node.becomes_typeinfo))
def is_defined_in_current_module(self, fullname: Optional[str]) -> bool:
if fullname is None:
return False
return module_prefix(self.modules, fullname) == self.cur_mod_id
def lookup_qualified(self, name: str, ctx: Context,
suppress_errors: bool = False) -> Optional[SymbolTableNode]:
if '.' not in name:
# Simple case: look up a short name.
return self.lookup(name, ctx, suppress_errors=suppress_errors)
parts = name.split('.')
namespace = self.cur_mod_id
sym = self.lookup(parts[0], ctx, suppress_errors=suppress_errors)
if sym:
for i in range(1, len(parts)):
node = sym.node
part = parts[i]
if isinstance(node, TypeInfo):
nextsym = node.get(part)
elif isinstance(node, MypyFile):
nextsym = self.get_module_symbol(node, part)
namespace = node.fullname()
elif isinstance(node, PlaceholderNode):
return sym
else:
if isinstance(node, Var):
typ = get_proper_type(node.type)
if isinstance(typ, AnyType):
# Allow access through Var with Any type without error.
return self.implicit_symbol(sym, name, parts[i:], typ)
# Lookup through invalid node, such as variable or function
nextsym = None
if not nextsym or nextsym.module_hidden:
if not suppress_errors:
self.name_not_defined(name, ctx, namespace=namespace)
return None
sym = nextsym
return sym
def lookup_type_node(self, expr: Expression) -> Optional[SymbolTableNode]:
try:
t = expr_to_unanalyzed_type(expr)
except TypeTranslationError:
return None
if isinstance(t, UnboundType):
n = self.lookup_qualified(t.name, expr, suppress_errors=True)
return n
return None
def get_module_symbol(self, node: MypyFile, name: str) -> Optional[SymbolTableNode]:
module = node.fullname()
names = node.names
sym = names.get(name)
if not sym:
fullname = module + '.' + name
if fullname in self.modules:
sym = SymbolTableNode(GDEF, self.modules[fullname])
elif self.is_incomplete_namespace(module):
self.record_incomplete_ref()
elif ('__getattr__' in names
and (node.is_stub
or self.options.python_version >= (3, 7))):
gvar = self.create_getattr_var(names['__getattr__'], name, fullname)
if gvar:
sym = SymbolTableNode(GDEF, gvar)
elif self.is_missing_module(fullname):
# We use the fullname of the original definition so that we can
# detect whether two names refer to the same thing.
var_type = AnyType(TypeOfAny.from_unimported_type)
v = Var(name, type=var_type)
v._fullname = fullname
sym = SymbolTableNode(GDEF, v)
elif sym.module_hidden:
sym = None
return sym
def is_missing_module(self, module: str) -> bool:
return module in self.missing_modules
def implicit_symbol(self, sym: SymbolTableNode, name: str, parts: List[str],
source_type: AnyType) -> SymbolTableNode:
if sym.node is None:
basename = None
else:
basename = sym.node.fullname()
if basename is None:
fullname = name
else:
fullname = basename + '.' + '.'.join(parts)
var_type = AnyType(TypeOfAny.from_another_any, source_type)
var = Var(parts[-1], var_type)
var._fullname = fullname
return SymbolTableNode(GDEF, var)
def create_getattr_var(self, getattr_defn: SymbolTableNode,
name: str, fullname: str) -> Optional[Var]:
if isinstance(getattr_defn.node, (FuncDef, Var)):
node_type = get_proper_type(getattr_defn.node.type)
if isinstance(node_type, CallableType):
typ = node_type.ret_type
else:
typ = AnyType(TypeOfAny.from_error)
v = Var(name, type=typ)
v._fullname = fullname
v.from_module_getattr = True
return v
return None
def lookup_fully_qualified(self, name: str) -> SymbolTableNode:
parts = name.split('.')
n = self.modules[parts[0]]
for i in range(1, len(parts) - 1):
next_sym = n.names[parts[i]]
assert isinstance(next_sym.node, MypyFile)
n = next_sym.node
return n.names[parts[-1]]
def lookup_fully_qualified_or_none(self, fullname: str) -> Optional[SymbolTableNode]:
# TODO: unify/clean-up/simplify lookup methods, see #4157.
# TODO: support nested classes (but consider performance impact,
# we might keep the module level only lookup for thing like 'builtins.int').
assert '.' in fullname
module, name = fullname.rsplit('.', maxsplit=1)
if module not in self.modules:
return None
filenode = self.modules[module]
result = filenode.names.get(name)
if result is None and self.is_incomplete_namespace(module):
# TODO: More explicit handling of incomplete refs?
self.record_incomplete_ref()
return result
def builtin_type(self, fully_qualified_name: str) -> Instance:
sym = self.lookup_fully_qualified(fully_qualified_name)
node = sym.node
assert isinstance(node, TypeInfo)
return Instance(node, [AnyType(TypeOfAny.special_form)] * len(node.defn.type_vars))
def object_type(self) -> Instance:
return self.named_type('__builtins__.object')
def str_type(self) -> Instance:
return self.named_type('__builtins__.str')
def named_type(self, qualified_name: str, args: Optional[List[Type]] = None) -> Instance:
sym = self.lookup_qualified(qualified_name, Context())
assert sym, "Internal error: attempted to construct unknown type"
node = sym.node
assert isinstance(node, TypeInfo)
if args:
# TODO: assert len(args) == len(node.defn.type_vars)
return Instance(node, args)
return Instance(node, [AnyType(TypeOfAny.special_form)] * len(node.defn.type_vars))
def named_type_or_none(self, qualified_name: str,
args: Optional[List[Type]] = None) -> Optional[Instance]:
sym = self.lookup_fully_qualified_or_none(qualified_name)
if not sym or isinstance(sym.node, PlaceholderNode):
return None
node = sym.node
if isinstance(node, TypeAlias):
assert isinstance(node.target, Instance) # type: ignore
node = node.target.type
assert isinstance(node, TypeInfo), node
if args is not None:
# TODO: assert len(args) == len(node.defn.type_vars)
return Instance(node, args)
return Instance(node, [AnyType(TypeOfAny.unannotated)] * len(node.defn.type_vars))
def lookup_current_scope(self, name: str) -> Optional[SymbolTableNode]:
if self.locals[-1] is not None:
return self.locals[-1].get(name)
elif self.type is not None:
return self.type.names.get(name)
else:
return self.globals.get(name)
#
# Adding symbols
#
def add_symbol(self,
name: str,
node: SymbolNode,
context: Context,
module_public: bool = True,
module_hidden: bool = False,
can_defer: bool = True,
escape_comprehensions: bool = False) -> bool:
if self.is_func_scope():
kind = LDEF
elif self.type is not None:
kind = MDEF
else:
kind = GDEF
symbol = SymbolTableNode(kind,
node,
module_public=module_public,
module_hidden=module_hidden)
return self.add_symbol_table_node(name, symbol, context, can_defer, escape_comprehensions)
def add_symbol_skip_local(self, name: str, node: SymbolNode) -> None:
# TODO: currently this is only used by named tuples. Use this method
# also by typed dicts and normal classes, see issue #6422.
if self.type is not None:
names = self.type.names
kind = MDEF
else:
names = self.globals
kind = GDEF
symbol = SymbolTableNode(kind, node)
names[name] = symbol
def add_symbol_table_node(self,
name: str,
symbol: SymbolTableNode,
context: Optional[Context] = None,
can_defer: bool = True,
escape_comprehensions: bool = False) -> bool:
names = self.current_symbol_table(escape_comprehensions=escape_comprehensions)
existing = names.get(name)
if isinstance(symbol.node, PlaceholderNode) and can_defer:
self.defer(context)
if (existing is not None
and context is not None
and not is_valid_replacement(existing, symbol)):
# There is an existing node, so this may be a redefinition.
# If the new node points to the same node as the old one,
# or if both old and new nodes are placeholders, we don't
old = existing.node
new = symbol.node
if isinstance(new, PlaceholderNode):
return False
if not is_same_symbol(old, new):
if isinstance(new, (FuncDef, Decorator, OverloadedFuncDef, TypeInfo)):
self.add_redefinition(names, name, symbol)
if not (isinstance(new, (FuncDef, Decorator))
and self.set_original_def(old, new)):
self.name_already_defined(name, context, existing)
elif name not in self.missing_names and '*' not in self.missing_names:
names[name] = symbol
self.progress = True
return True
return False
def add_redefinition(self,
names: SymbolTable,
name: str,
symbol: SymbolTableNode) -> None:
i = 1
# busted internal references which can cause problems with
# serialization and they can't have any external references to
symbol.no_serialize = True
while True:
if i == 1:
new_name = '{}-redefinition'.format(name)
else:
new_name = '{}-redefinition{}'.format(name, i)
existing = names.get(new_name)
if existing is None:
names[new_name] = symbol
return
elif existing.node is symbol.node:
return
i += 1
def add_module_symbol(self,
id: str,
as_id: str,
module_public: bool,
context: Context,
module_hidden: bool = False) -> None:
if id in self.modules:
node = self.modules[id]
self.add_symbol(as_id, node, context,
module_public=module_public,
module_hidden=module_hidden)
else:
self.add_unknown_imported_symbol(as_id, context, target_name=id)
def add_local(self, node: Union[Var, FuncDef, OverloadedFuncDef], context: Context) -> None:
assert self.is_func_scope()
name = node.name()
node._fullname = name
self.add_symbol(name, node, context)
def add_imported_symbol(self,
name: str,
node: SymbolTableNode,
context: Context,
module_public: bool = True,
module_hidden: bool = False) -> None:
symbol = SymbolTableNode(node.kind, node.node,
module_public=module_public,
module_hidden=module_hidden)
self.add_symbol_table_node(name, symbol, context)
def add_unknown_imported_symbol(self,
name: str,
context: Context,
target_name: Optional[str] = None) -> None:
existing = self.current_symbol_table().get(name)
if existing and isinstance(existing.node, Var) and existing.node.is_suppressed_import:
return
var = Var(name)
if self.options.logical_deps and target_name is not None:
# few places we assume that the full name points to a real
# definition, but this name may point to nothing.
var._fullname = target_name
elif self.type:
var._fullname = self.type.fullname() + "." + name
var.info = self.type
else:
var._fullname = self.qualified_name(name)
var.is_ready = True
any_type = AnyType(TypeOfAny.from_unimported_type, missing_import_name=var._fullname)
var.type = any_type
var.is_suppressed_import = True
self.add_symbol(name, var, context)
#
# Other helpers
#
@contextmanager
def tvar_scope_frame(self, frame: TypeVarScope) -> Iterator[None]:
old_scope = self.tvar_scope
self.tvar_scope = frame
yield
self.tvar_scope = old_scope
def defer(self, debug_context: Optional[Context] = None) -> None:
assert not self.final_iteration, 'Must not defer during final iteration'
self.deferred = True
# Store debug info for this deferral.
line = (debug_context.line if debug_context else
self.statement.line if self.statement else -1)
self.deferral_debug_context.append((self.cur_mod_id, line))
def track_incomplete_refs(self) -> Tag:
return self.num_incomplete_refs
def found_incomplete_ref(self, tag: Tag) -> bool:
return self.num_incomplete_refs != tag
def record_incomplete_ref(self) -> None:
self.defer()
self.num_incomplete_refs += 1
def mark_incomplete(self, name: str, node: Node,
becomes_typeinfo: bool = False) -> None:
self.defer(node)
if name == '*':
self.incomplete = True
elif not self.is_global_or_nonlocal(name):
fullname = self.qualified_name(name)
assert self.statement
placeholder = PlaceholderNode(fullname, node, self.statement.line,
becomes_typeinfo=becomes_typeinfo)
self.add_symbol(name, placeholder, context=dummy_context())
self.missing_names.add(name)
def is_incomplete_namespace(self, fullname: str) -> bool:
return fullname in self.incomplete_namespaces
def process_placeholder(self, name: str, kind: str, ctx: Context) -> None:
if self.final_iteration:
self.cannot_resolve_name(name, kind, ctx)
else:
self.defer(ctx)
def cannot_resolve_name(self, name: str, kind: str, ctx: Context) -> None:
self.fail('Cannot resolve {} "{}" (possible cyclic definition)'.format(kind, name), ctx)
def qualified_name(self, name: str) -> str:
if self.type is not None:
return self.type._fullname + '.' + name
elif self.is_func_scope():
return name
else:
return self.cur_mod_id + '.' + name
def enter(self, function: Union[FuncItem, GeneratorExpr, DictionaryComprehension]) -> None:
names = self.saved_locals.setdefault(function, SymbolTable())
self.locals.append(names)
is_comprehension = isinstance(function, (GeneratorExpr, DictionaryComprehension))
self.is_comprehension_stack.append(is_comprehension)
self.global_decls.append(set())
self.nonlocal_decls.append(set())
# -1 since entering block will increment this to 0.
self.block_depth.append(-1)
def leave(self) -> None:
self.locals.pop()
self.is_comprehension_stack.pop()
self.global_decls.pop()
self.nonlocal_decls.pop()
self.block_depth.pop()
def is_func_scope(self) -> bool:
return self.locals[-1] is not None
def is_nested_within_func_scope(self) -> bool:
return any(l is not None for l in self.locals)
def is_class_scope(self) -> bool:
return self.type is not None and not self.is_func_scope()
def is_module_scope(self) -> bool:
return not (self.is_class_scope() or self.is_func_scope())
def current_symbol_kind(self) -> int:
if self.is_class_scope():
kind = MDEF
elif self.is_func_scope():
kind = LDEF
else:
kind = GDEF
return kind
def current_symbol_table(self, escape_comprehensions: bool = False) -> SymbolTable:
if self.is_func_scope():
assert self.locals[-1] is not None
if escape_comprehensions:
for i, is_comprehension in enumerate(reversed(self.is_comprehension_stack)):
if not is_comprehension:
names = self.locals[-1 - i]
break
else:
assert False, "Should have at least one non-comprehension scope"
else:
names = self.locals[-1]
assert names is not None
elif self.type is not None:
names = self.type.names
else:
names = self.globals
return names
def is_global_or_nonlocal(self, name: str) -> bool:
return (self.is_func_scope()
and (name in self.global_decls[-1]
or name in self.nonlocal_decls[-1]))
def add_exports(self, exp_or_exps: Union[Iterable[Expression], Expression]) -> None:
exps = [exp_or_exps] if isinstance(exp_or_exps, Expression) else exp_or_exps
for exp in exps:
if isinstance(exp, StrExpr):
self.all_exports.append(exp.value)
def check_no_global(self,
name: str,
ctx: Context,
is_overloaded_func: bool = False) -> None:
if name in self.globals:
prev_is_overloaded = isinstance(self.globals[name], OverloadedFuncDef)
if is_overloaded_func and prev_is_overloaded:
self.fail("Nonconsecutive overload {} found".format(name), ctx)
elif prev_is_overloaded:
self.fail("Definition of '{}' missing 'overload'".format(name), ctx)
else:
self.name_already_defined(name, ctx, self.globals[name])
def name_not_defined(self, name: str, ctx: Context, namespace: Optional[str] = None) -> None:
if self.is_incomplete_namespace(namespace or self.cur_mod_id):
# Target namespace is incomplete, so it's possible that the name will be defined
self.record_incomplete_ref()
return
message = "Name '{}' is not defined".format(name)
self.fail(message, ctx, code=codes.NAME_DEFINED)
if 'builtins.{}'.format(name) in SUGGESTED_TEST_FIXTURES:
fullname = 'builtins.{}'.format(name)
if self.lookup_fully_qualified_or_none(fullname) is None:
# Yes. Generate a helpful note.
self.add_fixture_note(fullname, ctx)
modules_with_unimported_hints = {
name.split('.', 1)[0]
for name in TYPES_FOR_UNIMPORTED_HINTS
}
lowercased = {
name.lower(): name
for name in TYPES_FOR_UNIMPORTED_HINTS
}
for module in modules_with_unimported_hints:
fullname = '{}.{}'.format(module, name).lower()
if fullname not in lowercased:
continue
# User probably forgot to import these types.
hint = (
'Did you forget to import it from "{module}"?'
' (Suggestion: "from {module} import {name}")'
).format(module=module, name=lowercased[fullname].rsplit('.', 1)[-1])
self.note(hint, ctx, code=codes.NAME_DEFINED)
def already_defined(self,
name: str,
ctx: Context,
original_ctx: Optional[Union[SymbolTableNode, SymbolNode]],
noun: str) -> None:
if isinstance(original_ctx, SymbolTableNode):
node = original_ctx.node # type: Optional[SymbolNode]
elif isinstance(original_ctx, SymbolNode):
node = original_ctx
else:
node = None
if isinstance(original_ctx, SymbolTableNode) and isinstance(original_ctx.node, MypyFile):
# Since this is an import, original_ctx.node points to the module definition.
# Therefore its line number is always 1, which is not useful for this
# error message.
extra_msg = ' (by an import)'
elif node and node.line != -1 and self.is_local_name(node.fullname()):
# TODO: Using previous symbol node may give wrong line. We should use
# the line number where the binding was established instead.
extra_msg = ' on line {}'.format(node.line)
else:
extra_msg = ' (possibly by an import)'
self.fail("{} '{}' already defined{}".format(noun, unmangle(name), extra_msg), ctx,
code=codes.NO_REDEF)
def name_already_defined(self,
name: str,
ctx: Context,
original_ctx: Optional[Union[SymbolTableNode, SymbolNode]] = None
) -> None:
self.already_defined(name, ctx, original_ctx, noun='Name')
def attribute_already_defined(self,
name: str,
ctx: Context,
original_ctx: Optional[Union[SymbolTableNode, SymbolNode]] = None
) -> None:
self.already_defined(name, ctx, original_ctx, noun='Attribute')
def is_local_name(self, name: str) -> bool:
return self.is_defined_in_current_module(name) or '.' not in name
def fail(self,
msg: str,
ctx: Context,
serious: bool = False,
*,
code: Optional[ErrorCode] = None,
blocker: bool = False) -> None:
if (not serious and
not self.options.check_untyped_defs and
self.function_stack and
self.function_stack[-1].is_dynamic()):
return
# In case it's a bug and we don't really have context
assert ctx is not None, msg
self.errors.report(ctx.get_line(), ctx.get_column(), msg, blocker=blocker, code=code)
def fail_blocker(self, msg: str, ctx: Context) -> None:
self.fail(msg, ctx, blocker=True)
def note(self, msg: str, ctx: Context, code: Optional[ErrorCode] = None) -> None:
if (not self.options.check_untyped_defs and
self.function_stack and
self.function_stack[-1].is_dynamic()):
return
self.errors.report(ctx.get_line(), ctx.get_column(), msg, severity='note', code=code)
def accept(self, node: Node) -> None:
try:
node.accept(self)
except Exception as err:
report_internal_error(err, self.errors.file, node.line, self.errors, self.options)
def expr_to_analyzed_type(self,
expr: Expression,
report_invalid_types: bool = True,
allow_placeholder: bool = False) -> Optional[Type]:
if isinstance(expr, CallExpr):
expr.accept(self)
is_named_tuple, info = self.named_tuple_analyzer.check_namedtuple(expr, None,
self.is_func_scope())
if not is_named_tuple:
# Some form of namedtuple is the only valid type that looks like a call
# expression. This isn't a valid type.
raise TypeTranslationError()
elif not info:
self.defer(expr)
return None
assert info.tuple_type, "NamedTuple without tuple type"
fallback = Instance(info, [])
return TupleType(info.tuple_type.items, fallback=fallback)
typ = expr_to_unanalyzed_type(expr)
return self.anal_type(typ, report_invalid_types=report_invalid_types,
allow_placeholder=allow_placeholder)
def analyze_type_expr(self, expr: Expression) -> None:
with self.tvar_scope_frame(TypeVarScope()):
expr.accept(self)
def type_analyzer(self, *,
tvar_scope: Optional[TypeVarScope] = None,
allow_tuple_literal: bool = False,
allow_unbound_tvars: bool = False,
allow_placeholder: bool = False,
report_invalid_types: bool = True) -> TypeAnalyser:
if tvar_scope is None:
tvar_scope = self.tvar_scope
tpan = TypeAnalyser(self,
tvar_scope,
self.plugin,
self.options,
self.is_typeshed_stub_file,
allow_unbound_tvars=allow_unbound_tvars,
allow_tuple_literal=allow_tuple_literal,
report_invalid_types=report_invalid_types,
allow_unnormalized=self.is_stub_file,
allow_placeholder=allow_placeholder)
tpan.in_dynamic_func = bool(self.function_stack and self.function_stack[-1].is_dynamic())
tpan.global_scope = not self.type and not self.function_stack
return tpan
def anal_type(self,
typ: Type, *,
tvar_scope: Optional[TypeVarScope] = None,
allow_tuple_literal: bool = False,
allow_unbound_tvars: bool = False,
allow_placeholder: bool = False,
report_invalid_types: bool = True,
third_pass: bool = False) -> Optional[Type]:
a = self.type_analyzer(tvar_scope=tvar_scope,
allow_unbound_tvars=allow_unbound_tvars,
allow_tuple_literal=allow_tuple_literal,
allow_placeholder=allow_placeholder,
report_invalid_types=report_invalid_types)
tag = self.track_incomplete_refs()
typ = typ.accept(a)
if self.found_incomplete_ref(tag):
return None
self.add_type_alias_deps(a.aliases_used)
return typ
def class_type(self, self_type: Type) -> Type:
return TypeType.make_normalized(self_type)
def schedule_patch(self, priority: int, patch: Callable[[], None]) -> None:
self.patches.append((priority, patch))
def report_hang(self) -> None:
print('Deferral trace:')
for mod, line in self.deferral_debug_context:
print(' {}:{}'.format(mod, line))
self.errors.report(-1, -1,
'INTERNAL ERROR: maximum semantic analysis iteration count reached',
blocker=True)
def add_plugin_dependency(self, trigger: str, target: Optional[str] = None) -> None:
if target is None:
target = self.scope.current_target()
self.cur_mod_node.plugin_deps.setdefault(trigger, set()).add(target)
def add_type_alias_deps(self,
aliases_used: Iterable[str],
target: Optional[str] = None) -> None:
if not aliases_used:
return
if target is None:
target = self.scope.current_target()
self.cur_mod_node.alias_deps[target].update(aliases_used)
def is_mangled_global(self, name: str) -> bool:
return unmangle(name) + "'" in self.globals
def is_initial_mangled_global(self, name: str) -> bool:
# If there are renamed definitions for a global, the first one has exactly one prime.
return name == unmangle(name) + "'"
def parse_bool(self, expr: Expression) -> Optional[bool]:
if isinstance(expr, NameExpr):
if expr.fullname == 'builtins.True':
return True
if expr.fullname == 'builtins.False':
return False
return None
class HasPlaceholders(TypeQuery[bool]):
def __init__(self) -> None:
super().__init__(any)
def visit_placeholder_type(self, t: PlaceholderType) -> bool:
return True
def has_placeholder(typ: Type) -> bool:
return typ.accept(HasPlaceholders())
def replace_implicit_first_type(sig: FunctionLike, new: Type) -> FunctionLike:
if isinstance(sig, CallableType):
if len(sig.arg_types) == 0:
return sig
return sig.copy_modified(arg_types=[new] + sig.arg_types[1:])
elif isinstance(sig, Overloaded):
return Overloaded([cast(CallableType, replace_implicit_first_type(i, new))
for i in sig.items()])
else:
assert False
def refers_to_fullname(node: Expression, fullname: str) -> bool:
if not isinstance(node, RefExpr):
return False
if node.fullname == fullname:
return True
if isinstance(node.node, TypeAlias):
target = get_proper_type(node.node.target)
if isinstance(target, Instance) and target.type.fullname() == fullname:
return True
return False
def refers_to_class_or_function(node: Expression) -> bool:
return (isinstance(node, RefExpr) and
isinstance(node.node, (TypeInfo, FuncDef, OverloadedFuncDef)))
def find_duplicate(list: List[T]) -> Optional[T]:
for i in range(1, len(list)):
if list[i] in list[:i]:
return list[i]
return None
def remove_imported_names_from_symtable(names: SymbolTable,
module: str) -> None:
removed = []
for name, node in names.items():
if node.node is None:
continue
fullname = node.node.fullname()
prefix = fullname[:fullname.rfind('.')]
if prefix != module:
removed.append(name)
for name in removed:
del names[name]
def make_any_non_explicit(t: Type) -> Type:
return t.accept(MakeAnyNonExplicit())
class MakeAnyNonExplicit(TypeTranslator):
def visit_any(self, t: AnyType) -> Type:
if t.type_of_any == TypeOfAny.explicit:
return t.copy_modified(TypeOfAny.special_form)
return t
def apply_semantic_analyzer_patches(patches: List[Tuple[int, Callable[[], None]]]) -> None:
patches_by_priority = sorted(patches, key=lambda x: x[0])
for priority, patch_func in patches_by_priority:
patch_func()
def names_modified_by_assignment(s: AssignmentStmt) -> List[NameExpr]:
result = []
for lvalue in s.lvalues:
result += names_modified_in_lvalue(lvalue)
return result
def names_modified_in_lvalue(lvalue: Lvalue) -> List[NameExpr]:
if isinstance(lvalue, NameExpr):
return [lvalue]
elif isinstance(lvalue, StarExpr):
return names_modified_in_lvalue(lvalue.expr)
elif isinstance(lvalue, (ListExpr, TupleExpr)):
result = []
for item in lvalue.items:
result += names_modified_in_lvalue(item)
return result
return []
def is_same_var_from_getattr(n1: Optional[SymbolNode], n2: Optional[SymbolNode]) -> bool:
return (isinstance(n1, Var)
and n1.from_module_getattr
and isinstance(n2, Var)
and n2.from_module_getattr
and n1.fullname() == n2.fullname())
def dummy_context() -> Context:
return TempNode(AnyType(TypeOfAny.special_form))
def is_valid_replacement(old: SymbolTableNode, new: SymbolTableNode) -> bool:
if isinstance(old.node, PlaceholderNode):
if isinstance(new.node, PlaceholderNode):
return not old.node.becomes_typeinfo and new.node.becomes_typeinfo
else:
return True
return False
def is_same_symbol(a: Optional[SymbolNode], b: Optional[SymbolNode]) -> bool:
return (a == b
or (isinstance(a, PlaceholderNode)
and isinstance(b, PlaceholderNode))
or is_same_var_from_getattr(a, b))
| true
| true
|
790c0d2f6f5179f121fbf4b29a5fd0f930a25a99
| 2,958
|
py
|
Python
|
create_pictures.py
|
Vantoine2019/PCBS_experience_subitizing
|
55bdbfdce4d53f71572ad4afc3942f0e8f84dd66
|
[
"MIT"
] | null | null | null |
create_pictures.py
|
Vantoine2019/PCBS_experience_subitizing
|
55bdbfdce4d53f71572ad4afc3942f0e8f84dd66
|
[
"MIT"
] | null | null | null |
create_pictures.py
|
Vantoine2019/PCBS_experience_subitizing
|
55bdbfdce4d53f71572ad4afc3942f0e8f84dd66
|
[
"MIT"
] | null | null | null |
"""
Création des images pour la tâche de détermination numérique
(pour évaluer l'impact de la configuration sur le subitizing)
Victor ANTOINE - victor.antoine@ens.fr
"""
import pygame
from random import sample
from numpy import random, sort
from os import path
from itertools import product
W, H = 960, 540
pygame.init()
screen = pygame.display.set_mode((W, H), pygame.DOUBLEBUF)
screen.fill((0, 0, 0))
#création des images en disposition aléatoire
origin_x, origin_y = random.randint(50, 910), random.randint(50, 490)
list_coord_random_x = list_coords_random_y = []
def create_liste_coord_random(axe, origin):
coord1 = coord2 = origin
liste = []
liste.append(origin)
while coord1 <= axe - 160:
coord1 += 80
liste.append(coord1)
while coord2 >= 110:
coord2 -= 80
liste.append(coord2)
liste = list(sort(liste))
return liste
list_coord_random_x = create_liste_coord_random(W, origin_x)
list_coord_random_y = create_liste_coord_random(H, origin_y)
system_coord_random = list(product(list_coord_random_x, list_coord_random_y))
for version in list(range(1, 11)):
for points_number in list(range(1, 11)):
screen.fill((0, 0, 0))
for (x, y) in sample(system_coord_random, points_number):
pygame.draw.circle(screen, (255, 255, 255), (x, y), 30, 0)
pygame.image.save(screen, path.join("pictures", "random", \
str(points_number) + "_" + str(version) + ".png"))
#création des images en dispostion configurationnelle
def create_slot_coord_config(top, left):
liste_coord = []
for position in [(1, 1), (3, 1), (2, 2), (1, 3), (3, 3)]:
liste_coord.append((top + position[0] * ((W - 270)/8),\
left + position[1] * ((H - 270)/4)))
return liste_coord
coord_left_side = create_slot_coord_config(130, 130)
coord_mid_side = create_slot_coord_config(303, 130)
coord_right_side = create_slot_coord_config(475, 130)
system_coord_config = []
position = [[2], [1, 3], [1, 2, 3], [0, 1, 3, 4], [0, 1, 2, 3, 4]]
for number in range(1, 11):
list_coord = []
if number <= 5:
positions = position[number-1]
for circle in positions:
list_coord.append(coord_mid_side[circle])
system_coord_config.append(list_coord)
else:
for circle in position[4]:
list_coord.append(coord_left_side[circle])
positions = position[number-6]
for circle in positions:
list_coord.append(coord_right_side[circle])
system_coord_config.append(list_coord)
number_index = 1
for number in system_coord_config:
screen.fill((0, 0, 0))
for (x, y) in number:
pygame.draw.circle(screen, (255, 255, 255), (int(x), int(y)), 30, 0)
pygame.image.save(screen, path.join("pictures", "config", \
str(number_index) + ".png"))
number_index += 1
| 32.505495
| 80
| 0.64165
|
import pygame
from random import sample
from numpy import random, sort
from os import path
from itertools import product
W, H = 960, 540
pygame.init()
screen = pygame.display.set_mode((W, H), pygame.DOUBLEBUF)
screen.fill((0, 0, 0))
origin_x, origin_y = random.randint(50, 910), random.randint(50, 490)
list_coord_random_x = list_coords_random_y = []
def create_liste_coord_random(axe, origin):
coord1 = coord2 = origin
liste = []
liste.append(origin)
while coord1 <= axe - 160:
coord1 += 80
liste.append(coord1)
while coord2 >= 110:
coord2 -= 80
liste.append(coord2)
liste = list(sort(liste))
return liste
list_coord_random_x = create_liste_coord_random(W, origin_x)
list_coord_random_y = create_liste_coord_random(H, origin_y)
system_coord_random = list(product(list_coord_random_x, list_coord_random_y))
for version in list(range(1, 11)):
for points_number in list(range(1, 11)):
screen.fill((0, 0, 0))
for (x, y) in sample(system_coord_random, points_number):
pygame.draw.circle(screen, (255, 255, 255), (x, y), 30, 0)
pygame.image.save(screen, path.join("pictures", "random", \
str(points_number) + "_" + str(version) + ".png"))
def create_slot_coord_config(top, left):
liste_coord = []
for position in [(1, 1), (3, 1), (2, 2), (1, 3), (3, 3)]:
liste_coord.append((top + position[0] * ((W - 270)/8),\
left + position[1] * ((H - 270)/4)))
return liste_coord
coord_left_side = create_slot_coord_config(130, 130)
coord_mid_side = create_slot_coord_config(303, 130)
coord_right_side = create_slot_coord_config(475, 130)
system_coord_config = []
position = [[2], [1, 3], [1, 2, 3], [0, 1, 3, 4], [0, 1, 2, 3, 4]]
for number in range(1, 11):
list_coord = []
if number <= 5:
positions = position[number-1]
for circle in positions:
list_coord.append(coord_mid_side[circle])
system_coord_config.append(list_coord)
else:
for circle in position[4]:
list_coord.append(coord_left_side[circle])
positions = position[number-6]
for circle in positions:
list_coord.append(coord_right_side[circle])
system_coord_config.append(list_coord)
number_index = 1
for number in system_coord_config:
screen.fill((0, 0, 0))
for (x, y) in number:
pygame.draw.circle(screen, (255, 255, 255), (int(x), int(y)), 30, 0)
pygame.image.save(screen, path.join("pictures", "config", \
str(number_index) + ".png"))
number_index += 1
| true
| true
|
790c0d3627cb91fcd7991bf3021f1ae1e5773ae4
| 1,716
|
py
|
Python
|
django_school/classroom/urls.py
|
mauriciovieira/django-schools
|
487a1533afb643b6b3c81353cbc080ba430a77e3
|
[
"MIT"
] | null | null | null |
django_school/classroom/urls.py
|
mauriciovieira/django-schools
|
487a1533afb643b6b3c81353cbc080ba430a77e3
|
[
"MIT"
] | null | null | null |
django_school/classroom/urls.py
|
mauriciovieira/django-schools
|
487a1533afb643b6b3c81353cbc080ba430a77e3
|
[
"MIT"
] | null | null | null |
from django.urls import include, path
from rest_framework import routers
from .views import classroom, students, teachers, quizzes
urlpatterns = [
path('', classroom.home, name='home'),
path('quizzes/', quizzes.QuizViewSet.as_view({'get': 'list'}), name='quizzes_list'),
path('students/', include(([
path('', students.QuizListView.as_view(), name='quiz_list'),
path('s/', students.StudentList.as_view(), name='student_list'),
path('interests/', students.StudentInterestsView.as_view(), name='student_interests'),
path('taken/', students.TakenQuizListView.as_view(), name='taken_quiz_list'),
path('quiz/<int:pk>/', students.take_quiz, name='take_quiz'),
path('quiz/<int:pk>/studentresults/', students.QuizResultsView.as_view(), name='student_quiz_results'),
], 'classroom'), namespace='students')),
path('teachers/', include(([
path('', teachers.QuizListView.as_view(), name='quiz_change_list'),
path('quiz/add/', teachers.QuizCreateView.as_view(), name='quiz_add'),
path('quiz/<int:pk>/', teachers.QuizUpdateView.as_view(), name='quiz_change'),
path('quiz/<int:pk>/delete/', teachers.QuizDeleteView.as_view(), name='quiz_delete'),
path('quiz/<int:pk>/results/', teachers.QuizResultsView.as_view(), name='quiz_results'),
path('quiz/<int:pk>/question/add/', teachers.question_add, name='question_add'),
path('quiz/<int:quiz_pk>/question/<int:question_pk>/', teachers.question_change, name='question_change'),
path('quiz/<int:quiz_pk>/question/<int:question_pk>/delete/', teachers.QuestionDeleteView.as_view(), name='question_delete'),
], 'classroom'), namespace='teachers')),
]
| 57.2
| 133
| 0.68007
|
from django.urls import include, path
from rest_framework import routers
from .views import classroom, students, teachers, quizzes
urlpatterns = [
path('', classroom.home, name='home'),
path('quizzes/', quizzes.QuizViewSet.as_view({'get': 'list'}), name='quizzes_list'),
path('students/', include(([
path('', students.QuizListView.as_view(), name='quiz_list'),
path('s/', students.StudentList.as_view(), name='student_list'),
path('interests/', students.StudentInterestsView.as_view(), name='student_interests'),
path('taken/', students.TakenQuizListView.as_view(), name='taken_quiz_list'),
path('quiz/<int:pk>/', students.take_quiz, name='take_quiz'),
path('quiz/<int:pk>/studentresults/', students.QuizResultsView.as_view(), name='student_quiz_results'),
], 'classroom'), namespace='students')),
path('teachers/', include(([
path('', teachers.QuizListView.as_view(), name='quiz_change_list'),
path('quiz/add/', teachers.QuizCreateView.as_view(), name='quiz_add'),
path('quiz/<int:pk>/', teachers.QuizUpdateView.as_view(), name='quiz_change'),
path('quiz/<int:pk>/delete/', teachers.QuizDeleteView.as_view(), name='quiz_delete'),
path('quiz/<int:pk>/results/', teachers.QuizResultsView.as_view(), name='quiz_results'),
path('quiz/<int:pk>/question/add/', teachers.question_add, name='question_add'),
path('quiz/<int:quiz_pk>/question/<int:question_pk>/', teachers.question_change, name='question_change'),
path('quiz/<int:quiz_pk>/question/<int:question_pk>/delete/', teachers.QuestionDeleteView.as_view(), name='question_delete'),
], 'classroom'), namespace='teachers')),
]
| true
| true
|
790c0d61ff781be5b0ba77576bf031c256c978bf
| 575
|
py
|
Python
|
membership/migrations/0007_auto_20151011_2109.py
|
jlaunonen/turska
|
fc6ec4e0ae50a823e931152ce8835098b96f5966
|
[
"CC-BY-3.0"
] | null | null | null |
membership/migrations/0007_auto_20151011_2109.py
|
jlaunonen/turska
|
fc6ec4e0ae50a823e931152ce8835098b96f5966
|
[
"CC-BY-3.0"
] | null | null | null |
membership/migrations/0007_auto_20151011_2109.py
|
jlaunonen/turska
|
fc6ec4e0ae50a823e931152ce8835098b96f5966
|
[
"CC-BY-3.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('membership', '0006_auto_20151011_2005'),
]
operations = [
migrations.AlterField(
model_name='membership',
name='state',
field=models.CharField(max_length=10, verbose_name='Tila', choices=[('approval', 'Odottaa hyv\xe4ksynt\xe4\xe4'), ('in_effect', 'Voimassa'), ('discharged', 'Erotettu')]),
preserve_default=True,
),
]
| 27.380952
| 182
| 0.624348
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('membership', '0006_auto_20151011_2005'),
]
operations = [
migrations.AlterField(
model_name='membership',
name='state',
field=models.CharField(max_length=10, verbose_name='Tila', choices=[('approval', 'Odottaa hyv\xe4ksynt\xe4\xe4'), ('in_effect', 'Voimassa'), ('discharged', 'Erotettu')]),
preserve_default=True,
),
]
| true
| true
|
790c0eb70d2a829a17249a8fc860cb888ab38b1c
| 2,911
|
py
|
Python
|
bigmart.py
|
MayurJ20/bigmart
|
be0aab9908f9f4f3701b57936bfc2fe91c10eaca
|
[
"MIT"
] | null | null | null |
bigmart.py
|
MayurJ20/bigmart
|
be0aab9908f9f4f3701b57936bfc2fe91c10eaca
|
[
"MIT"
] | null | null | null |
bigmart.py
|
MayurJ20/bigmart
|
be0aab9908f9f4f3701b57936bfc2fe91c10eaca
|
[
"MIT"
] | 6
|
2020-11-29T17:21:25.000Z
|
2020-11-30T18:22:51.000Z
|
import pickle
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('Train.csv')
# check for categorical attributes
cat_col = []
for x in df.dtypes.index:
if df.dtypes[x] == 'object':
cat_col.append(x)
cat_col.remove('Item_Identifier')
cat_col.remove('Outlet_Identifier')
item_weight_mean = df.pivot_table(values = "Item_Weight", index = 'Item_Identifier')
miss_bool = df['Item_Weight'].isnull()
for i, item in enumerate(df['Item_Identifier']):
if miss_bool[i]:
if item in item_weight_mean:
df['Item_Weight'][i] = item_weight_mean.loc[item]['Item_Weight']
else:
df['Item_Weight'][i] = np.mean(df['Item_Weight'])
outlet_size_mode = df.pivot_table(values='Outlet_Size', columns='Outlet_Type', aggfunc=(lambda x: x.mode()[0]))
miss_bool = df['Outlet_Size'].isnull()
df.loc[miss_bool, 'Outlet_Size'] = df.loc[miss_bool, 'Outlet_Type'].apply(lambda x: outlet_size_mode[x])
# replace zeros with mean
df.loc[:, 'Item_Visibility'].replace([0], [df['Item_Visibility'].mean()], inplace=True)
# combine item fat content
df['Item_Fat_Content'] = df['Item_Fat_Content'].replace({'LF':'Low Fat', 'reg':'Regular', 'low fat':'Low Fat'})
df['Item_Fat_Content'].value_counts()
#Creation of New Attributes
df['New_Item_Type'] = df['Item_Identifier'].apply(lambda x: x[:2])
df['New_Item_Type'] = df['New_Item_Type'].map({'FD':'Food', 'NC':'Non-Consumable', 'DR':'Drinks'})
df.loc[df['New_Item_Type']=='Non-Consumable', 'Item_Fat_Content'] = 'Non-Edible'
# create small values for establishment year
df['Outlet_Years'] = 2013 - df['Outlet_Establishment_Year']
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df['Outlet'] = le.fit_transform(df['Outlet_Identifier'])
cat_col = ['Item_Fat_Content', 'Item_Type', 'Outlet_Size', 'Outlet_Location_Type', 'Outlet_Type', 'New_Item_Type']
for col in cat_col:
df[col] = le.fit_transform(df[col])
#Input Split
X = df.drop(columns=['Outlet_Establishment_Year', 'Item_Identifier', 'Outlet_Identifier', 'Item_Outlet_Sales'])
Y = df['Item_Outlet_Sales']
#Model Training
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error
def train(model, X, Y):
# train the model
model.fit(X, Y)
# predict the training set
pred = model.predict(X)
# perform cross-validation
cv_score = cross_val_score(model, X, Y, scoring='neg_mean_squared_error', cv=5)
cv_score = np.abs(np.mean(cv_score))
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor()
train(model, X, Y)
coef = pd.Series(model.feature_importances_, X.columns).sort_values(ascending=False)
file = open('model.pkl','wb')
#dump information to that file
pickle.dump(model, file)
| 38.302632
| 115
| 0.707317
|
import pickle
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('Train.csv')
cat_col = []
for x in df.dtypes.index:
if df.dtypes[x] == 'object':
cat_col.append(x)
cat_col.remove('Item_Identifier')
cat_col.remove('Outlet_Identifier')
item_weight_mean = df.pivot_table(values = "Item_Weight", index = 'Item_Identifier')
miss_bool = df['Item_Weight'].isnull()
for i, item in enumerate(df['Item_Identifier']):
if miss_bool[i]:
if item in item_weight_mean:
df['Item_Weight'][i] = item_weight_mean.loc[item]['Item_Weight']
else:
df['Item_Weight'][i] = np.mean(df['Item_Weight'])
outlet_size_mode = df.pivot_table(values='Outlet_Size', columns='Outlet_Type', aggfunc=(lambda x: x.mode()[0]))
miss_bool = df['Outlet_Size'].isnull()
df.loc[miss_bool, 'Outlet_Size'] = df.loc[miss_bool, 'Outlet_Type'].apply(lambda x: outlet_size_mode[x])
df.loc[:, 'Item_Visibility'].replace([0], [df['Item_Visibility'].mean()], inplace=True)
df['Item_Fat_Content'] = df['Item_Fat_Content'].replace({'LF':'Low Fat', 'reg':'Regular', 'low fat':'Low Fat'})
df['Item_Fat_Content'].value_counts()
df['New_Item_Type'] = df['Item_Identifier'].apply(lambda x: x[:2])
df['New_Item_Type'] = df['New_Item_Type'].map({'FD':'Food', 'NC':'Non-Consumable', 'DR':'Drinks'})
df.loc[df['New_Item_Type']=='Non-Consumable', 'Item_Fat_Content'] = 'Non-Edible'
df['Outlet_Years'] = 2013 - df['Outlet_Establishment_Year']
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df['Outlet'] = le.fit_transform(df['Outlet_Identifier'])
cat_col = ['Item_Fat_Content', 'Item_Type', 'Outlet_Size', 'Outlet_Location_Type', 'Outlet_Type', 'New_Item_Type']
for col in cat_col:
df[col] = le.fit_transform(df[col])
X = df.drop(columns=['Outlet_Establishment_Year', 'Item_Identifier', 'Outlet_Identifier', 'Item_Outlet_Sales'])
Y = df['Item_Outlet_Sales']
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error
def train(model, X, Y):
model.fit(X, Y)
pred = model.predict(X)
cv_score = cross_val_score(model, X, Y, scoring='neg_mean_squared_error', cv=5)
cv_score = np.abs(np.mean(cv_score))
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor()
train(model, X, Y)
coef = pd.Series(model.feature_importances_, X.columns).sort_values(ascending=False)
file = open('model.pkl','wb')
pickle.dump(model, file)
| true
| true
|
790c0f2a585ba2c2c078955f4565812d71fde4e8
| 715
|
py
|
Python
|
dpnode/dpn/data/migrations/0003_auto_20141117_2011.py
|
APTrust/EarthDiver
|
b894d4f5b3781b34e650ba6162b402b1c477da30
|
[
"Apache-2.0"
] | null | null | null |
dpnode/dpn/data/migrations/0003_auto_20141117_2011.py
|
APTrust/EarthDiver
|
b894d4f5b3781b34e650ba6162b402b1c477da30
|
[
"Apache-2.0"
] | null | null | null |
dpnode/dpn/data/migrations/0003_auto_20141117_2011.py
|
APTrust/EarthDiver
|
b894d4f5b3781b34e650ba6162b402b1c477da30
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('data', '0002_auto_20141114_1935'),
]
operations = [
migrations.RemoveField(
model_name='registryentry',
name='fixity_algorithm',
),
migrations.RemoveField(
model_name='registryentry',
name='fixity_value',
),
migrations.RemoveField(
model_name='registryentry',
name='last_fixity_date',
),
migrations.RemoveField(
model_name='registryentry',
name='published',
),
]
| 23.064516
| 44
| 0.569231
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('data', '0002_auto_20141114_1935'),
]
operations = [
migrations.RemoveField(
model_name='registryentry',
name='fixity_algorithm',
),
migrations.RemoveField(
model_name='registryentry',
name='fixity_value',
),
migrations.RemoveField(
model_name='registryentry',
name='last_fixity_date',
),
migrations.RemoveField(
model_name='registryentry',
name='published',
),
]
| true
| true
|
790c10f392bbf70b34f97632db87e63480e309d9
| 6,194
|
py
|
Python
|
rasa/data.py
|
wtoalabi/rasa
|
1106845b5628dc1f739a09f75270926b572af918
|
[
"Apache-2.0"
] | 2
|
2021-10-31T01:06:08.000Z
|
2021-11-08T09:43:23.000Z
|
rasa/data.py
|
alfredfrancis/rasa
|
d8d226408f20cc2563c3aefbccef3e364a447666
|
[
"Apache-2.0"
] | 56
|
2020-06-09T00:16:14.000Z
|
2020-11-16T00:25:20.000Z
|
rasa/data.py
|
alfredfrancis/rasa
|
d8d226408f20cc2563c3aefbccef3e364a447666
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import shutil
import tempfile
import uuid
import re
from typing import Tuple, List, Text, Set, Union, Optional, Iterable
from rasa.nlu.training_data import loading
from rasa.utils.io import DEFAULT_ENCODING
logger = logging.getLogger(__name__)
def get_core_directory(paths: Optional[Union[Text, List[Text]]],) -> Text:
"""Recursively collects all Core training files from a list of paths.
Args:
paths: List of paths to training files or folders containing them.
Returns:
Path to temporary directory containing all found Core training files.
"""
core_files, _ = get_core_nlu_files(paths)
return _copy_files_to_new_dir(core_files)
def get_nlu_directory(paths: Optional[Union[Text, List[Text]]],) -> Text:
"""Recursively collects all NLU training files from a list of paths.
Args:
paths: List of paths to training files or folders containing them.
Returns:
Path to temporary directory containing all found NLU training files.
"""
_, nlu_files = get_core_nlu_files(paths)
return _copy_files_to_new_dir(nlu_files)
def get_core_nlu_directories(
paths: Optional[Union[Text, List[Text]]],
) -> Tuple[Text, Text]:
"""Recursively collects all training files from a list of paths.
Args:
paths: List of paths to training files or folders containing them.
Returns:
Path to directory containing the Core files and path to directory
containing the NLU training files.
"""
story_files, nlu_data_files = get_core_nlu_files(paths)
story_directory = _copy_files_to_new_dir(story_files)
nlu_directory = _copy_files_to_new_dir(nlu_data_files)
return story_directory, nlu_directory
def get_core_nlu_files(
paths: Optional[Union[Text, List[Text]]]
) -> Tuple[List[Text], List[Text]]:
"""Recursively collects all training files from a list of paths.
Args:
paths: List of paths to training files or folders containing them.
Returns:
Tuple of paths to story and NLU files.
"""
story_files = set()
nlu_data_files = set()
if paths is None:
paths = []
elif isinstance(paths, str):
paths = [paths]
for path in set(paths):
if not path:
continue
if _is_valid_filetype(path):
if is_nlu_file(path):
nlu_data_files.add(os.path.abspath(path))
elif is_story_file(path):
story_files.add(os.path.abspath(path))
else:
new_story_files, new_nlu_data_files = _find_core_nlu_files_in_directory(
path
)
story_files.update(new_story_files)
nlu_data_files.update(new_nlu_data_files)
return sorted(story_files), sorted(nlu_data_files)
def _find_core_nlu_files_in_directory(directory: Text,) -> Tuple[Set[Text], Set[Text]]:
story_files = set()
nlu_data_files = set()
for root, _, files in os.walk(directory, followlinks=True):
# we sort the files here to ensure consistent order for repeatable training results
for f in sorted(files):
full_path = os.path.join(root, f)
if not _is_valid_filetype(full_path):
continue
if is_nlu_file(full_path):
nlu_data_files.add(full_path)
elif is_story_file(full_path):
story_files.add(full_path)
return story_files, nlu_data_files
def _is_valid_filetype(path: Text) -> bool:
is_file = os.path.isfile(path)
is_datafile = path.endswith(".json") or path.endswith(".md")
return is_file and is_datafile
def is_nlu_file(file_path: Text) -> bool:
"""Checks if a file is a Rasa compatible nlu file.
Args:
file_path: Path of the file which should be checked.
Returns:
`True` if it's a nlu file, otherwise `False`.
"""
return loading.guess_format(file_path) != loading.UNK
def is_story_file(file_path: Text) -> bool:
"""Checks if a file is a Rasa story file.
Args:
file_path: Path of the file which should be checked.
Returns:
`True` if it's a story file, otherwise `False`.
"""
if not file_path.endswith(".md"):
return False
try:
with open(
file_path, encoding=DEFAULT_ENCODING, errors="surrogateescape"
) as lines:
return any(_contains_story_pattern(line) for line in lines)
except Exception as e:
# catch-all because we might be loading files we are not expecting to load
logger.error(
f"Tried to check if '{file_path}' is a story file, but failed to "
f"read it. If this file contains story data, you should "
f"investigate this error, otherwise it is probably best to "
f"move the file to a different location. "
f"Error: {e}"
)
return False
def _contains_story_pattern(text: Text) -> bool:
story_pattern = r".*##.+"
return re.match(story_pattern, text) is not None
def is_domain_file(file_path: Text) -> bool:
"""Checks whether the given file path is a Rasa domain file.
Args:
file_path: Path of the file which should be checked.
Returns:
`True` if it's a domain file, otherwise `False`.
"""
file_name = os.path.basename(file_path)
return file_name in ["domain.yml", "domain.yaml"]
def is_config_file(file_path: Text) -> bool:
"""Checks whether the given file path is a Rasa config file.
Args:
file_path: Path of the file which should be checked.
Returns:
`True` if it's a Rasa config file, otherwise `False`.
"""
file_name = os.path.basename(file_path)
return file_name in ["config.yml", "config.yaml"]
def _copy_files_to_new_dir(files: Iterable[Text]) -> Text:
directory = tempfile.mkdtemp()
for f in files:
# makes sure files do not overwrite each other, hence the prefix
unique_prefix = uuid.uuid4().hex
unique_file_name = unique_prefix + "_" + os.path.basename(f)
shutil.copy2(f, os.path.join(directory, unique_file_name))
return directory
| 28.809302
| 91
| 0.658541
|
import logging
import os
import shutil
import tempfile
import uuid
import re
from typing import Tuple, List, Text, Set, Union, Optional, Iterable
from rasa.nlu.training_data import loading
from rasa.utils.io import DEFAULT_ENCODING
logger = logging.getLogger(__name__)
def get_core_directory(paths: Optional[Union[Text, List[Text]]],) -> Text:
core_files, _ = get_core_nlu_files(paths)
return _copy_files_to_new_dir(core_files)
def get_nlu_directory(paths: Optional[Union[Text, List[Text]]],) -> Text:
_, nlu_files = get_core_nlu_files(paths)
return _copy_files_to_new_dir(nlu_files)
def get_core_nlu_directories(
paths: Optional[Union[Text, List[Text]]],
) -> Tuple[Text, Text]:
story_files, nlu_data_files = get_core_nlu_files(paths)
story_directory = _copy_files_to_new_dir(story_files)
nlu_directory = _copy_files_to_new_dir(nlu_data_files)
return story_directory, nlu_directory
def get_core_nlu_files(
paths: Optional[Union[Text, List[Text]]]
) -> Tuple[List[Text], List[Text]]:
story_files = set()
nlu_data_files = set()
if paths is None:
paths = []
elif isinstance(paths, str):
paths = [paths]
for path in set(paths):
if not path:
continue
if _is_valid_filetype(path):
if is_nlu_file(path):
nlu_data_files.add(os.path.abspath(path))
elif is_story_file(path):
story_files.add(os.path.abspath(path))
else:
new_story_files, new_nlu_data_files = _find_core_nlu_files_in_directory(
path
)
story_files.update(new_story_files)
nlu_data_files.update(new_nlu_data_files)
return sorted(story_files), sorted(nlu_data_files)
def _find_core_nlu_files_in_directory(directory: Text,) -> Tuple[Set[Text], Set[Text]]:
story_files = set()
nlu_data_files = set()
for root, _, files in os.walk(directory, followlinks=True):
for f in sorted(files):
full_path = os.path.join(root, f)
if not _is_valid_filetype(full_path):
continue
if is_nlu_file(full_path):
nlu_data_files.add(full_path)
elif is_story_file(full_path):
story_files.add(full_path)
return story_files, nlu_data_files
def _is_valid_filetype(path: Text) -> bool:
is_file = os.path.isfile(path)
is_datafile = path.endswith(".json") or path.endswith(".md")
return is_file and is_datafile
def is_nlu_file(file_path: Text) -> bool:
return loading.guess_format(file_path) != loading.UNK
def is_story_file(file_path: Text) -> bool:
if not file_path.endswith(".md"):
return False
try:
with open(
file_path, encoding=DEFAULT_ENCODING, errors="surrogateescape"
) as lines:
return any(_contains_story_pattern(line) for line in lines)
except Exception as e:
logger.error(
f"Tried to check if '{file_path}' is a story file, but failed to "
f"read it. If this file contains story data, you should "
f"investigate this error, otherwise it is probably best to "
f"move the file to a different location. "
f"Error: {e}"
)
return False
def _contains_story_pattern(text: Text) -> bool:
story_pattern = r".*##.+"
return re.match(story_pattern, text) is not None
def is_domain_file(file_path: Text) -> bool:
file_name = os.path.basename(file_path)
return file_name in ["domain.yml", "domain.yaml"]
def is_config_file(file_path: Text) -> bool:
file_name = os.path.basename(file_path)
return file_name in ["config.yml", "config.yaml"]
def _copy_files_to_new_dir(files: Iterable[Text]) -> Text:
directory = tempfile.mkdtemp()
for f in files:
unique_prefix = uuid.uuid4().hex
unique_file_name = unique_prefix + "_" + os.path.basename(f)
shutil.copy2(f, os.path.join(directory, unique_file_name))
return directory
| true
| true
|
790c10f6ca40fb1e2cd380a5f521e54a031d3132
| 1,362
|
py
|
Python
|
submission_builder.py
|
TrpFrog/jellyfish-aquarium
|
52be3065a94a0b28696e6532983250725b372302
|
[
"MIT"
] | 1
|
2021-08-22T16:21:35.000Z
|
2021-08-22T16:21:35.000Z
|
submission_builder.py
|
TrpFrog/jellyfish-aquarium
|
52be3065a94a0b28696e6532983250725b372302
|
[
"MIT"
] | null | null | null |
submission_builder.py
|
TrpFrog/jellyfish-aquarium
|
52be3065a94a0b28696e6532983250725b372302
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import shutil
def make_empty_folder(folder_path:str):
if os.path.exists(folder_path):
if os.path.isdir(folder_path):
shutil.rmtree(folder_path)
else:
os.remove(folder_path)
os.mkdir(folder_path)
def copy_files(from_path:str, to_path:str, extension:str):
files = os.listdir(from_path)
for file in files:
if file.endswith(extension):
shutil.copy(from_path + '/' + file, to_path)
def append_src(to_file, from_file: str):
with open(from_file, 'r') as f:
for line in f:
to_file.write(line)
if __name__ == '__main__':
project_name = 'viscomp_final'
source_folder_name = 'JellyfishAquarium'
src_folder = './' + source_folder_name
out_folder = './' + project_name
make_empty_folder(out_folder)
for extension in ['png', 'jpg']:
copy_files(src_folder, out_folder, extension)
# combine all pde files into viscomp_final.pde
with open(f'{out_folder}/{project_name}.pde', 'w') as f:
append_src(f, f'{src_folder}/{source_folder_name}.pde')
files = os.listdir(src_folder)
for file in files:
if file.endswith('.pde') and file != f'{source_folder_name}.pde':
f.write('\n\n')
append_src(f, src_folder + '/' + file)
| 28.375
| 77
| 0.618943
|
import os
import shutil
def make_empty_folder(folder_path:str):
if os.path.exists(folder_path):
if os.path.isdir(folder_path):
shutil.rmtree(folder_path)
else:
os.remove(folder_path)
os.mkdir(folder_path)
def copy_files(from_path:str, to_path:str, extension:str):
files = os.listdir(from_path)
for file in files:
if file.endswith(extension):
shutil.copy(from_path + '/' + file, to_path)
def append_src(to_file, from_file: str):
with open(from_file, 'r') as f:
for line in f:
to_file.write(line)
if __name__ == '__main__':
project_name = 'viscomp_final'
source_folder_name = 'JellyfishAquarium'
src_folder = './' + source_folder_name
out_folder = './' + project_name
make_empty_folder(out_folder)
for extension in ['png', 'jpg']:
copy_files(src_folder, out_folder, extension)
with open(f'{out_folder}/{project_name}.pde', 'w') as f:
append_src(f, f'{src_folder}/{source_folder_name}.pde')
files = os.listdir(src_folder)
for file in files:
if file.endswith('.pde') and file != f'{source_folder_name}.pde':
f.write('\n\n')
append_src(f, src_folder + '/' + file)
| true
| true
|
790c10f98771cbe0b5628f249e002c7b6d3624cd
| 25,367
|
py
|
Python
|
tools/mo/openvino/tools/mo/front/kaldi/loader/loader.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 1,127
|
2018-10-15T14:36:58.000Z
|
2020-04-20T09:29:44.000Z
|
tools/mo/openvino/tools/mo/front/kaldi/loader/loader.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 439
|
2018-10-20T04:40:35.000Z
|
2020-04-19T05:56:25.000Z
|
tools/mo/openvino/tools/mo/front/kaldi/loader/loader.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 414
|
2018-10-17T05:53:46.000Z
|
2020-04-16T17:29:53.000Z
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import logging as log
from io import IOBase
import networkx as nx
import numpy as np
from openvino.tools.mo.ops.elementwise import Mul
from openvino.tools.mo.ops.split import AttributedVariadicSplit
from openvino.tools.mo.front.common.partial_infer.utils import float_array, int64_array
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import add_outputs_identity
from openvino.tools.mo.front.kaldi.loader.utils import find_next_tag, read_placeholder, find_next_component, get_name_from_path, \
find_end_of_component, end_of_nnet_tag, read_binary_integer32_token, get_parameters, read_token_value, \
collect_until_token, collect_until_token_and_read, create_edge_attrs, get_args_for_specifier
from openvino.tools.mo.front.kaldi.utils import read_binary_vector
from openvino.tools.mo.graph.graph import Node, Graph
from openvino.tools.mo.ops.const import Const
from openvino.tools.mo.utils.error import Error
from openvino.tools.mo.utils.utils import refer_to_faq_msg
def load_parallel_component(file_descr, graph: Graph, prev_layer_id):
"""
Load ParallelComponent of the Kaldi model.
ParallelComponent contains parallel nested networks.
VariadicSplit is inserted before nested networks.
Outputs of nested networks concatenate with layer Concat.
:param file_descr: descriptor of the model file
:param graph: graph with the topology.
:param prev_layer_id: id of the input layers for parallel component layer
:return: id of the concat layer - last layer of the parallel component layers
"""
nnet_count = read_token_value(file_descr, b'<NestedNnetCount>')
log.debug('Model contains parallel component with {} nested networks'.format(nnet_count))
split_points = []
outputs = []
inputs = []
for i in range(nnet_count):
read_token_value(file_descr, b'<NestedNnet>')
collect_until_token(file_descr, b'<Nnet>')
g = Graph()
load_kalid_nnet1_model(g, file_descr, 'Nested_net_{}'.format(i))
# input to nnet1 models is of a rank 1 but we also insert batch_size to 0th axis
# 1st axis contains input_size of the nested subnetwork
# we split input from the main network to subnetworks
input_node = Node(g, 'Parameter')
split_points.append(input_node['shape'][1])
g.remove_node(input_node.id)
mapping = {node: graph.unique_id(node) for node in g.nodes(data=False) if node in graph}
g = nx.relabel_nodes(g, mapping)
for val in mapping.values():
g.node[val]['name'] = val
graph.add_nodes_from(g.nodes(data=True))
graph.add_edges_from(g.edges(data=True))
sorted_nodes = tuple(nx.topological_sort(g))
outputs.append(Node(graph, sorted_nodes[-1]))
inputs.append(Node(graph, sorted_nodes[0]))
split_id = graph.unique_id(prefix='NestedNets/VariadicSplit')
attrs = {'out_ports_count': nnet_count, 'size_splits': split_points, 'axis': 1, 'name': split_id}
variadic_split_node = AttributedVariadicSplit(graph, attrs).create_node()
prev_layer_node = Node(graph, prev_layer_id)
prev_layer_node.add_output_port(0)
graph.create_edge(prev_layer_node, variadic_split_node, 0, 0, create_edge_attrs(prev_layer_id, variadic_split_node.id, prev_layer_id))
concat_id = graph.unique_id(prefix='Concat')
graph.add_node(concat_id, parameters=None, op='concat', kind='op')
concat_node = Node(graph, concat_id)
# Connect each output of variadic_split_node to each subnetwork's inputs in ParallelComponent
# and each subnetwork's output to concat_node
for i, (input_node, output_node) in enumerate(zip(inputs, outputs)):
output_node.add_output_port(0)
concat_node.add_input_port(i)
graph.create_edge(output_node, concat_node, 0, i, create_edge_attrs(output_node.id, concat_id, output_node.id, i, 0))
graph.create_edge(variadic_split_node, input_node, i, 0, create_edge_attrs(variadic_split_node.id, input_node.id, variadic_split_node.id, 0, i))
return concat_id
def load_kaldi_model(graph, nnet_path):
"""
Structure of the file is the following:
magic-number(16896)<Nnet> <Next Layer Name> weights etc.
:param nnet_path:
:return:
"""
nnet_name = None
if isinstance(nnet_path, str):
file_desc = open(nnet_path, "rb")
nnet_name = get_name_from_path(nnet_path)
elif isinstance(nnet_path, IOBase):
file_desc = nnet_path
else:
raise Error('Unsupported type of Kaldi model')
tag = find_next_tag(file_desc)
# start new model / submodel
if tag == '<Nnet>':
load_function = load_kalid_nnet1_model
elif tag == '<TransitionModel>':
while tag != '<Nnet>' and tag != '<Nnet3>':
tag = find_next_tag(file_desc)
if tag == '<Nnet3>':
load_function = load_kaldi_nnet3_model
else:
load_function = load_kalid_nnet2_model
elif tag == '<Nnet3>':
load_function = load_kaldi_nnet3_model
else:
raise Error('Kaldi model should start with <Nnet> or <TransitionModel> tag. ',
refer_to_faq_msg(89))
read_placeholder(file_desc, 1)
return load_function(graph, file_desc, nnet_name)
def load_kalid_nnet1_model(graph, file_descr, name):
prev_layer_id = 'Parameter'
graph.add_node(prev_layer_id, name=prev_layer_id, kind='op', op='Parameter', parameters=None)
# find out output layer, it can be only one due to chain structure of nnet1 model
output_layer = None
while True:
component_type = find_next_component(file_descr)
if component_type == end_of_nnet_tag.lower()[1:-1]:
break
layer_o = read_binary_integer32_token(file_descr)
layer_i = read_binary_integer32_token(file_descr)
if component_type == 'parallelcomponent':
prev_layer_id = load_parallel_component(file_descr, graph, prev_layer_id)
find_end_of_component(file_descr, component_type)
continue
start_index = file_descr.tell()
end_tag, end_index = find_end_of_component(file_descr, component_type)
end_index -= len(end_tag)
layer_id = graph.unique_id(prefix=component_type)
graph.add_node(layer_id,
parameters=get_parameters(file_descr, start_index, end_index),
op=component_type,
kind='op',
layer_i=layer_i,
layer_o=layer_o)
if hasattr(graph, 'op_names_statistic'):
graph.op_names_statistic[component_type] += 1
prev_node = Node(graph, prev_layer_id)
if prev_node.op == 'Parameter':
prev_node['shape'] = int64_array([1, layer_i])
prev_node.add_output_port(0)
Node(graph, layer_id).add_input_port(0)
graph.create_edge(prev_node, Node(graph, layer_id), 0, 0, create_edge_attrs(prev_layer_id, layer_id, prev_layer_id))
prev_layer_id = layer_id
output_layer = layer_id
log.debug('{} (type is {}) was loaded'.format(prev_layer_id, component_type))
# Tensor names information corresponding to a node is stored on outgoing edges.
# As output nodes do not have outgoing edges, fake outputs are required. In the following code
# for each output Identity node is added, and tensor name for the output is kept
# on (output, fake output) edge. After Result nodes adding transformation fake outputs
# are deleted from graph.
assert output_layer is not None, "Output layer is not found in graph"
add_outputs_identity(graph, [output_layer], lambda g, output, fake_output: g.create_edge(
Node(g, output), Node(g, fake_output), 0, 0, create_edge_attrs(output, fake_output, output)))
def load_kalid_nnet2_model(graph, file_descr, nnet_name):
input_name = 'Input'
graph.add_node(input_name, name=input_name, kind='op', op='Parameter', parameters=None, shape=None)
prev_layer_id = input_name
all_components = load_components(file_descr, graph)
used_layers = set()
for layer_id in all_components:
prev_node = Node(graph, prev_layer_id)
if prev_node.op == 'Parameter':
parameters = Node(graph, layer_id).parameters
input_dim = read_token_value(parameters, b'<InputDim>')
prev_node['shape'] = int64_array([1, input_dim])
prev_node.add_output_port(0)
Node(graph, layer_id).add_input_port(0)
graph.create_edge(prev_node, Node(graph, layer_id), 0, 0, create_edge_attrs(prev_layer_id, layer_id, prev_layer_id))
used_layers.add(prev_layer_id)
prev_layer_id = layer_id
log.debug('{} and {} were connected'.format(prev_layer_id, layer_id))
# Tensor names information corresponding to a node is stored on outgoing edges.
# As output nodes do not have outgoing edges, fake outputs are required. In the following code
# for each output Identity node is added, and tensor name for the output is kept
# on (output, fake output) edge. After Result nodes adding transformation fake outputs
# are deleted from graph.
output_layers = graph.nodes - used_layers
add_outputs_identity(graph, output_layers, lambda g, output, fake_output: g.create_edge(
Node(g, output), Node(g, fake_output), 0, 0, create_edge_attrs(output, fake_output, output)))
def load_kaldi_nnet3_model(graph, file_descr, nnet_name):
file_descr.read(1)
component_layer_map = load_topology_map(file_descr, graph)
# add information for shape calculation for MemoryOffset
# shape calculation for MemoryOffset can't be done through shape of previous layer because
# it is separated in 2 parts to remove cycle from graph
for node in graph.get_op_nodes(**{'op': 'Parameter'}):
for o_n_name, params in node.get_outputs():
o_n = Node(graph, o_n_name)
if o_n['op'] == 'MemoryOffset':
# don't take batch from Parameter, it will be overwritten
# take only second dimension because we have only 2 dimensions
o_n['parameters']['element_size'] = int64_array([1, node.shape[1]])
load_components(file_descr, graph, component_layer_map)
load_priors(file_descr, graph)
def load_priors(file_descr, graph):
try:
collect_until_token(file_descr, b'<Priors>')
except Error:
# just ignore if priors were not found
return
if graph.graph['cmd_params'].counts is not None:
graph.graph['priors'] = read_binary_vector(file_descr)
else:
log.error("Model contains Prior values, if you want to embed them into the generated IR add option --counts=\"\" to command line",
extra={'is_warning': True})
def load_components(file_descr, graph, component_layer_map=None):
num_components = collect_until_token_and_read(file_descr, b'<NumComponents>')
log.debug('Network contains {} components'.format(num_components))
is_nnet3 = False if component_layer_map is None else True
if not is_nnet3:
collect_until_token(file_descr, b'<Components>')
all_components = list()
name = ""
for _ in range(num_components):
if is_nnet3:
name = collect_until_token_and_read(file_descr, b'<ComponentName>', np.string_)
component_type = find_next_component(file_descr)
if component_type == end_of_nnet_tag.lower()[1:-1]:
break
start_index = file_descr.tell()
end_tag, end_index = find_end_of_component(file_descr, component_type)
# read dim info where possible to simplify shape calculation for MemoryOffset
# shape calculation for MemoryOffset can't be done through shape of previous layer because
# it is separated in 2 parts to remove cycle from graph
file_descr.seek(start_index)
dim = 0
dim_words = {b'<Dim>', b'<InputDim>'}
for dim_word in dim_words:
try:
collect_until_token(file_descr, dim_word, size_search_zone=end_index - start_index)
cur_index = file_descr.tell()
if start_index < cur_index < end_index:
dim = read_binary_integer32_token(file_descr)
break
else:
file_descr.seek(start_index)
except Error:
file_descr.seek(start_index)
if is_nnet3:
if name in component_layer_map:
layer_id = component_layer_map[name][0]
for layer in component_layer_map[name]:
node = Node(graph, layer)
node['parameters'] = get_parameters(file_descr, start_index, end_index)
node['op'] = component_type
# Read dim info where possible to simplify shape calculation for MemoryOffset
for o_n_name, params in node.get_outputs():
o_n = Node(graph, o_n_name)
if o_n['op'] == 'MemoryOffset' and dim != 0:
o_n['parameters']['element_size'] = int64_array([1, dim])
else:
raise Error("Something wrong with layer {}".format(name))
else:
layer_id = graph.unique_id(prefix=component_type)
graph.add_node(layer_id,
parameters=get_parameters(file_descr, start_index, end_index),
op=component_type,
kind='op')
if hasattr(graph, 'op_names_statistic'):
graph.op_names_statistic[component_type] += 1
all_components.append(layer_id)
log.debug('{} (type is {}) was loaded'.format(layer_id, component_type))
return all_components
def load_topology_map(file_descr, graph):
not_finished = True
component_layer_map = {}
layer_node_map = {}
while not_finished:
not_finished = read_node(file_descr, graph, component_layer_map, layer_node_map)
return component_layer_map
def read_node(file_descr, graph, component_layer_map, layer_node_map):
s = file_descr.readline()
if s == b'\n':
return False
tokens = s.split(b' ')
if tokens[0] == b'input-node':
in_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
in_name = str(in_name).strip('b').replace('\'', "")
in_shape = mo_array([1, s[s.find(b'dim=') + len(b'dim='):].split(b' ')[0]], dtype=np.int)
if in_name not in layer_node_map:
graph.add_node(in_name, name=in_name, kind='op', op='Parameter', parameters=None, shape=in_shape)
layer_node_map[in_name] = in_name
else:
Node(graph, in_name)['op'] = 'Parameter'
Node(graph, in_name)['shape'] = in_shape
elif tokens[0] == b'component-node':
layer_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
layer_name = str(layer_name).strip('b').replace('\'', "")
component_name = s[s.find(b'component=') + len(b'component='):].split(b' ')[0]
if layer_name not in layer_node_map:
node_name = graph.unique_id(prefix=layer_name)
graph.add_node(node_name,
parameters=None,
op=None,
kind='op')
layer_node_map[layer_name] = node_name
else:
node_name = layer_node_map[layer_name]
if component_name in component_layer_map:
component_layer_map[component_name].append(node_name)
else:
component_layer_map[component_name] = [node_name]
# parse input
in_node_id = parse_input_for_node(s[s.find(b'input=') + 6:], graph, layer_node_map)
# don't create cyclic edges node to itself to avoid removing later
if in_node_id != node_name:
out_port = len(Node(graph, in_node_id).out_nodes())
in_port = len(Node(graph, node_name).in_nodes())
Node(graph, node_name).add_input_port(in_port)
Node(graph, in_node_id).add_output_port(out_port, skip_if_exist=True)
graph.add_edge(in_node_id, node_name, **create_edge_attrs(in_node_id, node_name, in_node_id, in_port, out_port))
elif tokens[0] == b'output-node':
layer_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
layer_name = str(layer_name).strip('b').replace('\'', "")
node_name = graph.unique_id(prefix=layer_name)
graph.add_node(node_name,
parameters=None,
op='Identity',
kind='op')
out_name = graph.unique_id(prefix=node_name + "_out")
graph.add_node(out_name,
parameters=None,
op='Result',
kind='op')
Node(graph, node_name).add_input_port(0)
Node(graph, node_name).add_output_port(0)
Node(graph, out_name).add_input_port(0)
graph.add_edge(node_name, out_name, **create_edge_attrs(node_name, out_name, node_name))
# parse input
in_node_id = parse_input_for_node(s[s.find(b'input=') + len(b'input='):], graph, layer_node_map)
out_port = len(Node(graph, in_node_id).out_nodes())
Node(graph, in_node_id).add_output_port(out_port)
graph.create_edge(Node(graph, in_node_id), Node(graph, node_name), out_port, 0, create_edge_attrs(in_node_id, node_name, in_node_id, 0, out_port))
objective_type = s[s.find(b'objective=') + 10:].split(b' ')[0].split(b'\n')[0]
if objective_type != b'linear':
raise Error("Unsupported objective-type for output {}".format(node_name))
elif tokens[0] == b'dim-range-node':
layer_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
layer_name = str(layer_name).strip('b').replace('\'', "")
offset = int(s[s.find(b'dim-offset=') + len(b'dim-offset='):].split(b' ')[0])
dim = int(s[s.find(b'dim=') + len(b'dim='):].split(b' ')[0])
if layer_name in layer_node_map:
node_name = layer_node_map[layer_name]
node = Node(graph, node_name)
node['parameters'] = {'offset': mo_array([offset]), 'dim': mo_array([dim]), 'axis': mo_array([1])}
node['op'] = 'Crop'
else:
node_name = graph.unique_id(prefix=layer_name)
graph.add_node(node_name,
parameters={'offset': mo_array([offset]), 'dim': mo_array([dim]), 'axis': mo_array([1])},
op='Crop',
kind='op')
layer_node_map[layer_name] = node_name
node = Node(graph, node_name)
in_node_id = parse_input_for_node(s[s.find(b'input-node=') + len(b'input-node='):], graph, layer_node_map)
out_port = len(Node(graph, in_node_id).out_nodes())
in_port = len(Node(graph, node_name).in_nodes())
node.add_input_port(in_port)
Node(graph, in_node_id).add_output_port(out_port)
graph.create_edge(Node(graph, in_node_id), node, out_port, in_port, create_edge_attrs(in_node_id, node_name, in_node_id, in_port, out_port))
# read dim info where possible to simplify shape calculation for MemoryOffset
# shape calculation for MemoryOffset can't be done through shape of previous layer because
# it is separated in 2 parts to remove cycle from graph
for o_n_name, params in node.get_outputs():
o_n = Node(graph, o_n_name)
if o_n['op'] == 'MemoryOffset':
o_n['parameters']['element_size'] = int64_array([1, dim])
else:
raise Error("Unsupported node specifier {}".format(tokens[0]))
return True
def parse_input_for_node(string, graph, component_layer_map):
return parse_specifier(string, graph, component_layer_map)
def parse_specifier(string, graph, layer_node_map):
pos = string.find(b'(')
if pos == -1:
# node name
input_name = str(string.split(b' ')[0]).strip('b').replace("\'", '').replace('\\n', '')
if input_name not in layer_node_map:
node_name = graph.unique_id(prefix=input_name)
graph.add_node(node_name, parameters=[], op="", kind='op')
layer_node_map[input_name] = node_name
else:
node_name = layer_node_map[input_name]
return node_name
spec = string[:pos]
args = get_args_for_specifier(string[pos:])
if spec == b'Append':
nodes = []
for i in range(len(args)):
nodes.append(parse_specifier(args[i], graph, layer_node_map))
layer_name = 'Append_'
for node in nodes:
layer_name = layer_name + node + "_"
if layer_name not in layer_node_map:
concat_name = graph.unique_id(prefix=layer_name)
graph.add_node(concat_name,
parameters=None,
op='concat',
kind='op')
layer_node_map[layer_name] = concat_name
i = 0
Node(graph, concat_name).add_sequence_of_ports('in', range(len(nodes)))
for node in nodes:
out_port = len(Node(graph, node).out_nodes())
Node(graph, node).add_output_port(out_port)
graph.create_edge(Node(graph, node), Node(graph, concat_name), out_port, i, create_edge_attrs(node, concat_name, node, i, out_port))
i = i + 1
else:
concat_name = layer_node_map[layer_name]
return concat_name
elif spec == b'Offset':
node = parse_specifier(args[0], graph, layer_node_map)
t = int(args[1])
if len(args) > 2:
raise Error("ModelOptimizer supports only 2 arguments for Offset")
layer_name = 'Offset_' + node + '_'
if t < 0:
layer_name = layer_name + '_' + str(-t)
else:
layer_name = layer_name + str(t)
if layer_name not in layer_node_map:
memory_name = graph.unique_id(prefix=layer_name)
layer_node_map[layer_name] = memory_name
memory_name_2 = memory_name + '_out'
graph.add_node(memory_name,
parameters=dict(t=t, pair_name=memory_name_2, has_default=False),
op='MemoryOffset',
kind='op')
out_port = len(Node(graph, node).out_nodes())
in_port = len(Node(graph, memory_name).in_nodes())
Node(graph, memory_name).add_input_port(in_port)
Node(graph, node).add_output_port(out_port, skip_if_exist=True)
graph.create_edge(Node(graph, node), Node(graph, memory_name), out_port, in_port, create_edge_attrs(node, memory_name, node, in_port, out_port))
else:
memory_name = layer_node_map[layer_name]
return memory_name
elif spec == b'Sum':
nodes = []
for i in range(len(args)):
nodes.append(parse_specifier(args[i], graph, layer_node_map))
layer_name = 'Sum_'
for node in nodes:
layer_name = layer_name + node + "_"
if layer_name not in layer_node_map:
sum_name = graph.unique_id(prefix=layer_name)
graph.add_node(sum_name, parameters=None, op='Add', kind='op')
layer_node_map[layer_name] = sum_name
else:
sum_name = layer_node_map[layer_name]
for i, node in enumerate(nodes):
out_port = len(Node(graph, node).out_nodes())
Node(graph, node).add_output_port(out_port, skip_if_exist=True)
Node(graph, sum_name).add_input_port(i)
graph.add_edge(node, sum_name, **create_edge_attrs(node, sum_name, node, i))
return sum_name
elif spec == b'IfDefined':
node_id = parse_specifier(args[0], graph, layer_node_map)
node = Node(graph, node_id)
if node.op == 'MemoryOffset':
node['parameters']['has_default'] = True
return node_id
elif spec == b'ReplaceIndex':
node = parse_specifier(args[0], graph, layer_node_map)
return node
elif spec == b'Scale':
node_name = parse_specifier(args[1], graph, layer_node_map)
scale_value = float(args[0])
layer_name = '{}/Mul/{}'.format(node_name, scale_value)
if layer_name not in layer_node_map:
scale_name = graph.unique_id(prefix=layer_name)
scale_node = Mul(graph, {'name': scale_name}).create_node()
layer_node_map[layer_name] = scale_name
scale_const_name = 'Const_{}'.format(scale_value)
const_node = Const(graph, {'name': scale_const_name, 'value': float_array([scale_value])}).create_node()
node = Node(graph, node_name)
graph.create_edge(const_node, scale_node, 0, 0, create_edge_attrs(const_node.id, scale_node.id, const_node.id))
out_port = len(node.out_nodes())
graph.create_edge(node, scale_node, out_port, 1, create_edge_attrs(node_name, scale_node.id, node_name, 1, out_port))
else:
scale_name = layer_node_map[layer_name]
return scale_name
| 45.217469
| 156
| 0.643277
|
import logging as log
from io import IOBase
import networkx as nx
import numpy as np
from openvino.tools.mo.ops.elementwise import Mul
from openvino.tools.mo.ops.split import AttributedVariadicSplit
from openvino.tools.mo.front.common.partial_infer.utils import float_array, int64_array
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import add_outputs_identity
from openvino.tools.mo.front.kaldi.loader.utils import find_next_tag, read_placeholder, find_next_component, get_name_from_path, \
find_end_of_component, end_of_nnet_tag, read_binary_integer32_token, get_parameters, read_token_value, \
collect_until_token, collect_until_token_and_read, create_edge_attrs, get_args_for_specifier
from openvino.tools.mo.front.kaldi.utils import read_binary_vector
from openvino.tools.mo.graph.graph import Node, Graph
from openvino.tools.mo.ops.const import Const
from openvino.tools.mo.utils.error import Error
from openvino.tools.mo.utils.utils import refer_to_faq_msg
def load_parallel_component(file_descr, graph: Graph, prev_layer_id):
nnet_count = read_token_value(file_descr, b'<NestedNnetCount>')
log.debug('Model contains parallel component with {} nested networks'.format(nnet_count))
split_points = []
outputs = []
inputs = []
for i in range(nnet_count):
read_token_value(file_descr, b'<NestedNnet>')
collect_until_token(file_descr, b'<Nnet>')
g = Graph()
load_kalid_nnet1_model(g, file_descr, 'Nested_net_{}'.format(i))
input_node = Node(g, 'Parameter')
split_points.append(input_node['shape'][1])
g.remove_node(input_node.id)
mapping = {node: graph.unique_id(node) for node in g.nodes(data=False) if node in graph}
g = nx.relabel_nodes(g, mapping)
for val in mapping.values():
g.node[val]['name'] = val
graph.add_nodes_from(g.nodes(data=True))
graph.add_edges_from(g.edges(data=True))
sorted_nodes = tuple(nx.topological_sort(g))
outputs.append(Node(graph, sorted_nodes[-1]))
inputs.append(Node(graph, sorted_nodes[0]))
split_id = graph.unique_id(prefix='NestedNets/VariadicSplit')
attrs = {'out_ports_count': nnet_count, 'size_splits': split_points, 'axis': 1, 'name': split_id}
variadic_split_node = AttributedVariadicSplit(graph, attrs).create_node()
prev_layer_node = Node(graph, prev_layer_id)
prev_layer_node.add_output_port(0)
graph.create_edge(prev_layer_node, variadic_split_node, 0, 0, create_edge_attrs(prev_layer_id, variadic_split_node.id, prev_layer_id))
concat_id = graph.unique_id(prefix='Concat')
graph.add_node(concat_id, parameters=None, op='concat', kind='op')
concat_node = Node(graph, concat_id)
# and each subnetwork's output to concat_node
for i, (input_node, output_node) in enumerate(zip(inputs, outputs)):
output_node.add_output_port(0)
concat_node.add_input_port(i)
graph.create_edge(output_node, concat_node, 0, i, create_edge_attrs(output_node.id, concat_id, output_node.id, i, 0))
graph.create_edge(variadic_split_node, input_node, i, 0, create_edge_attrs(variadic_split_node.id, input_node.id, variadic_split_node.id, 0, i))
return concat_id
def load_kaldi_model(graph, nnet_path):
nnet_name = None
if isinstance(nnet_path, str):
file_desc = open(nnet_path, "rb")
nnet_name = get_name_from_path(nnet_path)
elif isinstance(nnet_path, IOBase):
file_desc = nnet_path
else:
raise Error('Unsupported type of Kaldi model')
tag = find_next_tag(file_desc)
if tag == '<Nnet>':
load_function = load_kalid_nnet1_model
elif tag == '<TransitionModel>':
while tag != '<Nnet>' and tag != '<Nnet3>':
tag = find_next_tag(file_desc)
if tag == '<Nnet3>':
load_function = load_kaldi_nnet3_model
else:
load_function = load_kalid_nnet2_model
elif tag == '<Nnet3>':
load_function = load_kaldi_nnet3_model
else:
raise Error('Kaldi model should start with <Nnet> or <TransitionModel> tag. ',
refer_to_faq_msg(89))
read_placeholder(file_desc, 1)
return load_function(graph, file_desc, nnet_name)
def load_kalid_nnet1_model(graph, file_descr, name):
prev_layer_id = 'Parameter'
graph.add_node(prev_layer_id, name=prev_layer_id, kind='op', op='Parameter', parameters=None)
output_layer = None
while True:
component_type = find_next_component(file_descr)
if component_type == end_of_nnet_tag.lower()[1:-1]:
break
layer_o = read_binary_integer32_token(file_descr)
layer_i = read_binary_integer32_token(file_descr)
if component_type == 'parallelcomponent':
prev_layer_id = load_parallel_component(file_descr, graph, prev_layer_id)
find_end_of_component(file_descr, component_type)
continue
start_index = file_descr.tell()
end_tag, end_index = find_end_of_component(file_descr, component_type)
end_index -= len(end_tag)
layer_id = graph.unique_id(prefix=component_type)
graph.add_node(layer_id,
parameters=get_parameters(file_descr, start_index, end_index),
op=component_type,
kind='op',
layer_i=layer_i,
layer_o=layer_o)
if hasattr(graph, 'op_names_statistic'):
graph.op_names_statistic[component_type] += 1
prev_node = Node(graph, prev_layer_id)
if prev_node.op == 'Parameter':
prev_node['shape'] = int64_array([1, layer_i])
prev_node.add_output_port(0)
Node(graph, layer_id).add_input_port(0)
graph.create_edge(prev_node, Node(graph, layer_id), 0, 0, create_edge_attrs(prev_layer_id, layer_id, prev_layer_id))
prev_layer_id = layer_id
output_layer = layer_id
log.debug('{} (type is {}) was loaded'.format(prev_layer_id, component_type))
assert output_layer is not None, "Output layer is not found in graph"
add_outputs_identity(graph, [output_layer], lambda g, output, fake_output: g.create_edge(
Node(g, output), Node(g, fake_output), 0, 0, create_edge_attrs(output, fake_output, output)))
def load_kalid_nnet2_model(graph, file_descr, nnet_name):
input_name = 'Input'
graph.add_node(input_name, name=input_name, kind='op', op='Parameter', parameters=None, shape=None)
prev_layer_id = input_name
all_components = load_components(file_descr, graph)
used_layers = set()
for layer_id in all_components:
prev_node = Node(graph, prev_layer_id)
if prev_node.op == 'Parameter':
parameters = Node(graph, layer_id).parameters
input_dim = read_token_value(parameters, b'<InputDim>')
prev_node['shape'] = int64_array([1, input_dim])
prev_node.add_output_port(0)
Node(graph, layer_id).add_input_port(0)
graph.create_edge(prev_node, Node(graph, layer_id), 0, 0, create_edge_attrs(prev_layer_id, layer_id, prev_layer_id))
used_layers.add(prev_layer_id)
prev_layer_id = layer_id
log.debug('{} and {} were connected'.format(prev_layer_id, layer_id))
output_layers = graph.nodes - used_layers
add_outputs_identity(graph, output_layers, lambda g, output, fake_output: g.create_edge(
Node(g, output), Node(g, fake_output), 0, 0, create_edge_attrs(output, fake_output, output)))
def load_kaldi_nnet3_model(graph, file_descr, nnet_name):
file_descr.read(1)
component_layer_map = load_topology_map(file_descr, graph)
# it is separated in 2 parts to remove cycle from graph
for node in graph.get_op_nodes(**{'op': 'Parameter'}):
for o_n_name, params in node.get_outputs():
o_n = Node(graph, o_n_name)
if o_n['op'] == 'MemoryOffset':
# don't take batch from Parameter, it will be overwritten
o_n['parameters']['element_size'] = int64_array([1, node.shape[1]])
load_components(file_descr, graph, component_layer_map)
load_priors(file_descr, graph)
def load_priors(file_descr, graph):
try:
collect_until_token(file_descr, b'<Priors>')
except Error:
return
if graph.graph['cmd_params'].counts is not None:
graph.graph['priors'] = read_binary_vector(file_descr)
else:
log.error("Model contains Prior values, if you want to embed them into the generated IR add option --counts=\"\" to command line",
extra={'is_warning': True})
def load_components(file_descr, graph, component_layer_map=None):
num_components = collect_until_token_and_read(file_descr, b'<NumComponents>')
log.debug('Network contains {} components'.format(num_components))
is_nnet3 = False if component_layer_map is None else True
if not is_nnet3:
collect_until_token(file_descr, b'<Components>')
all_components = list()
name = ""
for _ in range(num_components):
if is_nnet3:
name = collect_until_token_and_read(file_descr, b'<ComponentName>', np.string_)
component_type = find_next_component(file_descr)
if component_type == end_of_nnet_tag.lower()[1:-1]:
break
start_index = file_descr.tell()
end_tag, end_index = find_end_of_component(file_descr, component_type)
# it is separated in 2 parts to remove cycle from graph
file_descr.seek(start_index)
dim = 0
dim_words = {b'<Dim>', b'<InputDim>'}
for dim_word in dim_words:
try:
collect_until_token(file_descr, dim_word, size_search_zone=end_index - start_index)
cur_index = file_descr.tell()
if start_index < cur_index < end_index:
dim = read_binary_integer32_token(file_descr)
break
else:
file_descr.seek(start_index)
except Error:
file_descr.seek(start_index)
if is_nnet3:
if name in component_layer_map:
layer_id = component_layer_map[name][0]
for layer in component_layer_map[name]:
node = Node(graph, layer)
node['parameters'] = get_parameters(file_descr, start_index, end_index)
node['op'] = component_type
# Read dim info where possible to simplify shape calculation for MemoryOffset
for o_n_name, params in node.get_outputs():
o_n = Node(graph, o_n_name)
if o_n['op'] == 'MemoryOffset' and dim != 0:
o_n['parameters']['element_size'] = int64_array([1, dim])
else:
raise Error("Something wrong with layer {}".format(name))
else:
layer_id = graph.unique_id(prefix=component_type)
graph.add_node(layer_id,
parameters=get_parameters(file_descr, start_index, end_index),
op=component_type,
kind='op')
if hasattr(graph, 'op_names_statistic'):
graph.op_names_statistic[component_type] += 1
all_components.append(layer_id)
log.debug('{} (type is {}) was loaded'.format(layer_id, component_type))
return all_components
def load_topology_map(file_descr, graph):
not_finished = True
component_layer_map = {}
layer_node_map = {}
while not_finished:
not_finished = read_node(file_descr, graph, component_layer_map, layer_node_map)
return component_layer_map
def read_node(file_descr, graph, component_layer_map, layer_node_map):
s = file_descr.readline()
if s == b'\n':
return False
tokens = s.split(b' ')
if tokens[0] == b'input-node':
in_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
in_name = str(in_name).strip('b').replace('\'', "")
in_shape = mo_array([1, s[s.find(b'dim=') + len(b'dim='):].split(b' ')[0]], dtype=np.int)
if in_name not in layer_node_map:
graph.add_node(in_name, name=in_name, kind='op', op='Parameter', parameters=None, shape=in_shape)
layer_node_map[in_name] = in_name
else:
Node(graph, in_name)['op'] = 'Parameter'
Node(graph, in_name)['shape'] = in_shape
elif tokens[0] == b'component-node':
layer_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
layer_name = str(layer_name).strip('b').replace('\'', "")
component_name = s[s.find(b'component=') + len(b'component='):].split(b' ')[0]
if layer_name not in layer_node_map:
node_name = graph.unique_id(prefix=layer_name)
graph.add_node(node_name,
parameters=None,
op=None,
kind='op')
layer_node_map[layer_name] = node_name
else:
node_name = layer_node_map[layer_name]
if component_name in component_layer_map:
component_layer_map[component_name].append(node_name)
else:
component_layer_map[component_name] = [node_name]
# parse input
in_node_id = parse_input_for_node(s[s.find(b'input=') + 6:], graph, layer_node_map)
# don't create cyclic edges node to itself to avoid removing later
if in_node_id != node_name:
out_port = len(Node(graph, in_node_id).out_nodes())
in_port = len(Node(graph, node_name).in_nodes())
Node(graph, node_name).add_input_port(in_port)
Node(graph, in_node_id).add_output_port(out_port, skip_if_exist=True)
graph.add_edge(in_node_id, node_name, **create_edge_attrs(in_node_id, node_name, in_node_id, in_port, out_port))
elif tokens[0] == b'output-node':
layer_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
layer_name = str(layer_name).strip('b').replace('\'', "")
node_name = graph.unique_id(prefix=layer_name)
graph.add_node(node_name,
parameters=None,
op='Identity',
kind='op')
out_name = graph.unique_id(prefix=node_name + "_out")
graph.add_node(out_name,
parameters=None,
op='Result',
kind='op')
Node(graph, node_name).add_input_port(0)
Node(graph, node_name).add_output_port(0)
Node(graph, out_name).add_input_port(0)
graph.add_edge(node_name, out_name, **create_edge_attrs(node_name, out_name, node_name))
# parse input
in_node_id = parse_input_for_node(s[s.find(b'input=') + len(b'input='):], graph, layer_node_map)
out_port = len(Node(graph, in_node_id).out_nodes())
Node(graph, in_node_id).add_output_port(out_port)
graph.create_edge(Node(graph, in_node_id), Node(graph, node_name), out_port, 0, create_edge_attrs(in_node_id, node_name, in_node_id, 0, out_port))
objective_type = s[s.find(b'objective=') + 10:].split(b' ')[0].split(b'\n')[0]
if objective_type != b'linear':
raise Error("Unsupported objective-type for output {}".format(node_name))
elif tokens[0] == b'dim-range-node':
layer_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
layer_name = str(layer_name).strip('b').replace('\'', "")
offset = int(s[s.find(b'dim-offset=') + len(b'dim-offset='):].split(b' ')[0])
dim = int(s[s.find(b'dim=') + len(b'dim='):].split(b' ')[0])
if layer_name in layer_node_map:
node_name = layer_node_map[layer_name]
node = Node(graph, node_name)
node['parameters'] = {'offset': mo_array([offset]), 'dim': mo_array([dim]), 'axis': mo_array([1])}
node['op'] = 'Crop'
else:
node_name = graph.unique_id(prefix=layer_name)
graph.add_node(node_name,
parameters={'offset': mo_array([offset]), 'dim': mo_array([dim]), 'axis': mo_array([1])},
op='Crop',
kind='op')
layer_node_map[layer_name] = node_name
node = Node(graph, node_name)
in_node_id = parse_input_for_node(s[s.find(b'input-node=') + len(b'input-node='):], graph, layer_node_map)
out_port = len(Node(graph, in_node_id).out_nodes())
in_port = len(Node(graph, node_name).in_nodes())
node.add_input_port(in_port)
Node(graph, in_node_id).add_output_port(out_port)
graph.create_edge(Node(graph, in_node_id), node, out_port, in_port, create_edge_attrs(in_node_id, node_name, in_node_id, in_port, out_port))
# it is separated in 2 parts to remove cycle from graph
for o_n_name, params in node.get_outputs():
o_n = Node(graph, o_n_name)
if o_n['op'] == 'MemoryOffset':
o_n['parameters']['element_size'] = int64_array([1, dim])
else:
raise Error("Unsupported node specifier {}".format(tokens[0]))
return True
def parse_input_for_node(string, graph, component_layer_map):
return parse_specifier(string, graph, component_layer_map)
def parse_specifier(string, graph, layer_node_map):
pos = string.find(b'(')
if pos == -1:
# node name
input_name = str(string.split(b' ')[0]).strip('b').replace("\'", '').replace('\\n', '')
if input_name not in layer_node_map:
node_name = graph.unique_id(prefix=input_name)
graph.add_node(node_name, parameters=[], op="", kind='op')
layer_node_map[input_name] = node_name
else:
node_name = layer_node_map[input_name]
return node_name
spec = string[:pos]
args = get_args_for_specifier(string[pos:])
if spec == b'Append':
nodes = []
for i in range(len(args)):
nodes.append(parse_specifier(args[i], graph, layer_node_map))
layer_name = 'Append_'
for node in nodes:
layer_name = layer_name + node + "_"
if layer_name not in layer_node_map:
concat_name = graph.unique_id(prefix=layer_name)
graph.add_node(concat_name,
parameters=None,
op='concat',
kind='op')
layer_node_map[layer_name] = concat_name
i = 0
Node(graph, concat_name).add_sequence_of_ports('in', range(len(nodes)))
for node in nodes:
out_port = len(Node(graph, node).out_nodes())
Node(graph, node).add_output_port(out_port)
graph.create_edge(Node(graph, node), Node(graph, concat_name), out_port, i, create_edge_attrs(node, concat_name, node, i, out_port))
i = i + 1
else:
concat_name = layer_node_map[layer_name]
return concat_name
elif spec == b'Offset':
node = parse_specifier(args[0], graph, layer_node_map)
t = int(args[1])
if len(args) > 2:
raise Error("ModelOptimizer supports only 2 arguments for Offset")
layer_name = 'Offset_' + node + '_'
if t < 0:
layer_name = layer_name + '_' + str(-t)
else:
layer_name = layer_name + str(t)
if layer_name not in layer_node_map:
memory_name = graph.unique_id(prefix=layer_name)
layer_node_map[layer_name] = memory_name
memory_name_2 = memory_name + '_out'
graph.add_node(memory_name,
parameters=dict(t=t, pair_name=memory_name_2, has_default=False),
op='MemoryOffset',
kind='op')
out_port = len(Node(graph, node).out_nodes())
in_port = len(Node(graph, memory_name).in_nodes())
Node(graph, memory_name).add_input_port(in_port)
Node(graph, node).add_output_port(out_port, skip_if_exist=True)
graph.create_edge(Node(graph, node), Node(graph, memory_name), out_port, in_port, create_edge_attrs(node, memory_name, node, in_port, out_port))
else:
memory_name = layer_node_map[layer_name]
return memory_name
elif spec == b'Sum':
nodes = []
for i in range(len(args)):
nodes.append(parse_specifier(args[i], graph, layer_node_map))
layer_name = 'Sum_'
for node in nodes:
layer_name = layer_name + node + "_"
if layer_name not in layer_node_map:
sum_name = graph.unique_id(prefix=layer_name)
graph.add_node(sum_name, parameters=None, op='Add', kind='op')
layer_node_map[layer_name] = sum_name
else:
sum_name = layer_node_map[layer_name]
for i, node in enumerate(nodes):
out_port = len(Node(graph, node).out_nodes())
Node(graph, node).add_output_port(out_port, skip_if_exist=True)
Node(graph, sum_name).add_input_port(i)
graph.add_edge(node, sum_name, **create_edge_attrs(node, sum_name, node, i))
return sum_name
elif spec == b'IfDefined':
node_id = parse_specifier(args[0], graph, layer_node_map)
node = Node(graph, node_id)
if node.op == 'MemoryOffset':
node['parameters']['has_default'] = True
return node_id
elif spec == b'ReplaceIndex':
node = parse_specifier(args[0], graph, layer_node_map)
return node
elif spec == b'Scale':
node_name = parse_specifier(args[1], graph, layer_node_map)
scale_value = float(args[0])
layer_name = '{}/Mul/{}'.format(node_name, scale_value)
if layer_name not in layer_node_map:
scale_name = graph.unique_id(prefix=layer_name)
scale_node = Mul(graph, {'name': scale_name}).create_node()
layer_node_map[layer_name] = scale_name
scale_const_name = 'Const_{}'.format(scale_value)
const_node = Const(graph, {'name': scale_const_name, 'value': float_array([scale_value])}).create_node()
node = Node(graph, node_name)
graph.create_edge(const_node, scale_node, 0, 0, create_edge_attrs(const_node.id, scale_node.id, const_node.id))
out_port = len(node.out_nodes())
graph.create_edge(node, scale_node, out_port, 1, create_edge_attrs(node_name, scale_node.id, node_name, 1, out_port))
else:
scale_name = layer_node_map[layer_name]
return scale_name
| true
| true
|
790c1130918c13ad40b2c0d03ce2b98cd1705e60
| 1,197
|
py
|
Python
|
app/__init__.py
|
YouKnowBagu/item-catalog
|
b7cf67da8141a8c865663083a7f62c9d28b433c0
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
YouKnowBagu/item-catalog
|
b7cf67da8141a8c865663083a7f62c9d28b433c0
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
YouKnowBagu/item-catalog
|
b7cf67da8141a8c865663083a7f62c9d28b433c0
|
[
"MIT"
] | null | null | null |
"""App initialization file. Instantiates app, database, login_manager. Registers view blueprints. Defines user_loader callback for LoginManager."""
from flask import Flask
from flask_login import LoginManager
from flask_wtf.csrf import CSRFProtect
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from database import init_db, session
from models import Base, Category, Item, User
from views.auth import authModule
from views.categories import categoryModule
from views.items import itemModule
from views.site import siteModule
login_manager = LoginManager()
app = Flask(__name__)
login_manager.init_app(app)
csrf = CSRFProtect(app)
init_db()
@login_manager.user_loader
def load_user(userid):
user = session.query(User).filter_by(id=userid).first()
print "Trying to load %s" % user
if user:
return user
else:
return None
@app.teardown_appcontext
def shutdown_session(exception=None):
session.remove()
app.register_blueprint(categoryModule)
app.register_blueprint(itemModule)
app.register_blueprint(authModule)
app.register_blueprint(siteModule)
| 25.468085
| 150
| 0.798663
|
"""App initialization file. Instantiates app, database, login_manager. Registers view blueprints. Defines user_loader callback for LoginManager."""
from flask import Flask
from flask_login import LoginManager
from flask_wtf.csrf import CSRFProtect
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from database import init_db, session
from models import Base, Category, Item, User
from views.auth import authModule
from views.categories import categoryModule
from views.items import itemModule
from views.site import siteModule
login_manager = LoginManager()
app = Flask(__name__)
login_manager.init_app(app)
csrf = CSRFProtect(app)
init_db()
@login_manager.user_loader
def load_user(userid):
user = session.query(User).filter_by(id=userid).first()
print "Trying to load %s" % user
if user:
return user
else:
return None
@app.teardown_appcontext
def shutdown_session(exception=None):
session.remove()
app.register_blueprint(categoryModule)
app.register_blueprint(itemModule)
app.register_blueprint(authModule)
app.register_blueprint(siteModule)
| false
| true
|
790c11bc5387a3937e676d63c9e3046fa73af5eb
| 3,753
|
py
|
Python
|
lib/includes/utility.py
|
vutriancode/mfea_autoscaling
|
9672ce16c8a4353e8234d536e35e0eb8d1b72673
|
[
"MIT"
] | null | null | null |
lib/includes/utility.py
|
vutriancode/mfea_autoscaling
|
9672ce16c8a4353e8234d536e35e0eb8d1b72673
|
[
"MIT"
] | null | null | null |
lib/includes/utility.py
|
vutriancode/mfea_autoscaling
|
9672ce16c8a4353e8234d536e35e0eb8d1b72673
|
[
"MIT"
] | null | null | null |
"""
Author: thangbk2209
Project: Autoscaling
Created: 3/15/19 16:48
Purpose:
"""
import random
import os
import matplotlib
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import tensorflow as tf
from config import *
def draw_time_series(data, title, x_label, y_label, file_name):
plt.plot(data)
plt.title(title)
plt.ylabel(y_label)
plt.xlabel(x_label)
# plt.legend([/], loc='upper left')
plt.savefig(file_name + '.png')
plt.show()
plt.close()
def get_scaler(scaler_method):
if scaler_method == 'min_max_scaler':
return MinMaxScaler(feature_range=(0, 1))
if scaler_method == 'standard_scaler':
return StandardScaler()
else:
print(f'|-> ERROR: Not support {scaler_method}')
def get_activation(activation_name):
if activation_name == 'sigmoid':
return tf.nn.sigmoid
elif activation_name == 'relu':
return tf.nn.relu
elif activation_name == 'tanh':
return tf.nn.tanh
elif activation_name == 'elu':
return tf.nn.elu
else:
print(">>> Can not apply your activation <<<")
def get_optimizer(optimizer_name, lr):
if optimizer_name == 'momentum':
return tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9)
elif optimizer_name == 'adam':
return tf.train.AdamOptimizer(learning_rate=lr)
elif optimizer_name == 'rmsprop':
return tf.train.RMSPropOptimizer(learning_rate=lr)
else:
print(">>> Can not apply your optimizer <<<")
def early_stopping_decision(array, patience):
value = array[len(array) - patience - 1]
arr = array[len(array) - patience:]
check = 0
for val in arr:
if(val > value):
check += 1
if(check == patience):
return False
else:
return True
def draw_train_loss(loss_train, loss_valid, save_path):
plt.plot(loss_train)
plt.plot(loss_valid)
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig(save_path)
plt.close()
def average(arr):
return sum(arr) / len(arr)
def create_name(**kwargs):
key = list(kwargs.keys()) # collect the first key in kwargs dict
name = []
for _key in key:
value = str(kwargs[_key]).replace('[', '')
value = value.replace(']', '')
_name = f'{_key}_{value}'
name.append(_name)
return '-'.join(name)
def generate_units_size(network_size, layer_size):
assert network_size > 0, 'Network size invalid'
assert layer_size > 0, 'Layer size invalid'
num_units = []
for i in range(network_size):
# num_units.append(random.choice(range(1, layer_size, 1)))
num_units.append(int(layer_size))
if layer_size != 2:
layer_size /= 2
return num_units
def compute_scale_fitness_value(upper_prediction, lower_prediction, real_value):
rate_real_value_in_prediction_interval = 0
num_sample = len(upper_prediction)
for i in range(num_sample):
_real_value = real_value[i][0]
lower_border = lower_prediction[i]
higher_border = upper_prediction[i]
if _real_value <= higher_border and _real_value >= lower_border:
rate_real_value_in_prediction_interval += 1 / num_sample
return rate_real_value_in_prediction_interval
def gen_folder_in_path(path):
path_component = path.split('/')
path_infor = ''
for _path_component in path_component:
path_infor += f'/{_path_component}'
if not os.path.exists(path_infor):
os.mkdir(path_infor)
assert os.path.exists(path_infor), f'Can not generate folder in path {path}'
| 26.244755
| 80
| 0.656541
|
import random
import os
import matplotlib
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import tensorflow as tf
from config import *
def draw_time_series(data, title, x_label, y_label, file_name):
plt.plot(data)
plt.title(title)
plt.ylabel(y_label)
plt.xlabel(x_label)
plt.savefig(file_name + '.png')
plt.show()
plt.close()
def get_scaler(scaler_method):
if scaler_method == 'min_max_scaler':
return MinMaxScaler(feature_range=(0, 1))
if scaler_method == 'standard_scaler':
return StandardScaler()
else:
print(f'|-> ERROR: Not support {scaler_method}')
def get_activation(activation_name):
if activation_name == 'sigmoid':
return tf.nn.sigmoid
elif activation_name == 'relu':
return tf.nn.relu
elif activation_name == 'tanh':
return tf.nn.tanh
elif activation_name == 'elu':
return tf.nn.elu
else:
print(">>> Can not apply your activation <<<")
def get_optimizer(optimizer_name, lr):
if optimizer_name == 'momentum':
return tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9)
elif optimizer_name == 'adam':
return tf.train.AdamOptimizer(learning_rate=lr)
elif optimizer_name == 'rmsprop':
return tf.train.RMSPropOptimizer(learning_rate=lr)
else:
print(">>> Can not apply your optimizer <<<")
def early_stopping_decision(array, patience):
value = array[len(array) - patience - 1]
arr = array[len(array) - patience:]
check = 0
for val in arr:
if(val > value):
check += 1
if(check == patience):
return False
else:
return True
def draw_train_loss(loss_train, loss_valid, save_path):
plt.plot(loss_train)
plt.plot(loss_valid)
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig(save_path)
plt.close()
def average(arr):
return sum(arr) / len(arr)
def create_name(**kwargs):
key = list(kwargs.keys())
name = []
for _key in key:
value = str(kwargs[_key]).replace('[', '')
value = value.replace(']', '')
_name = f'{_key}_{value}'
name.append(_name)
return '-'.join(name)
def generate_units_size(network_size, layer_size):
assert network_size > 0, 'Network size invalid'
assert layer_size > 0, 'Layer size invalid'
num_units = []
for i in range(network_size):
num_units.append(int(layer_size))
if layer_size != 2:
layer_size /= 2
return num_units
def compute_scale_fitness_value(upper_prediction, lower_prediction, real_value):
rate_real_value_in_prediction_interval = 0
num_sample = len(upper_prediction)
for i in range(num_sample):
_real_value = real_value[i][0]
lower_border = lower_prediction[i]
higher_border = upper_prediction[i]
if _real_value <= higher_border and _real_value >= lower_border:
rate_real_value_in_prediction_interval += 1 / num_sample
return rate_real_value_in_prediction_interval
def gen_folder_in_path(path):
path_component = path.split('/')
path_infor = ''
for _path_component in path_component:
path_infor += f'/{_path_component}'
if not os.path.exists(path_infor):
os.mkdir(path_infor)
assert os.path.exists(path_infor), f'Can not generate folder in path {path}'
| true
| true
|
790c11f71733998d3ddc7cbc0ff5addafb00e0cc
| 3,395
|
py
|
Python
|
commons.py
|
gmberton/CosPlace
|
0f03cc9fe25919c87627e92535f3693747617eae
|
[
"MIT"
] | null | null | null |
commons.py
|
gmberton/CosPlace
|
0f03cc9fe25919c87627e92535f3693747617eae
|
[
"MIT"
] | null | null | null |
commons.py
|
gmberton/CosPlace
|
0f03cc9fe25919c87627e92535f3693747617eae
|
[
"MIT"
] | null | null | null |
import torch
import random
import numpy as np
class InfiniteDataLoader(torch.utils.data.DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_iterator = super().__iter__()
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.dataset_iterator)
except StopIteration:
self.dataset_iterator = super().__iter__()
batch = next(self.dataset_iterator)
return batch
def make_deterministic(seed=0):
"""Make results deterministic. If seed == -1, do not make deterministic.
Running your script in a deterministic way might slow it down.
Note that for some packages (eg: sklearn's PCA) this function is not enough.
"""
seed = int(seed)
if seed == -1:
return
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def setup_logging(output_folder, exist_ok=False, console="debug",
info_filename="info.log", debug_filename="debug.log"):
"""Set up logging files and console output.
Creates one file for INFO logs and one for DEBUG logs.
Args:
output_folder (str): creates the folder where to save the files.
exist_ok (boolean): if False throw a FileExistsError if output_folder already exists
debug (str):
if == "debug" prints on console debug messages and higher
if == "info" prints on console info messages and higher
if == None does not use console (useful when a logger has already been set)
info_filename (str): the name of the info file. if None, don't create info file
debug_filename (str): the name of the debug file. if None, don't create debug file
"""
import os
import sys
import logging
import traceback
if not exist_ok and os.path.exists(output_folder):
raise FileExistsError(f"{output_folder} already exists!")
os.makedirs(output_folder, exist_ok=True)
base_formatter = logging.Formatter('%(asctime)s %(message)s', "%Y-%m-%d %H:%M:%S")
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
if info_filename != None:
info_file_handler = logging.FileHandler(f'{output_folder}/{info_filename}')
info_file_handler.setLevel(logging.INFO)
info_file_handler.setFormatter(base_formatter)
logger.addHandler(info_file_handler)
if debug_filename != None:
debug_file_handler = logging.FileHandler(f'{output_folder}/{debug_filename}')
debug_file_handler.setLevel(logging.DEBUG)
debug_file_handler.setFormatter(base_formatter)
logger.addHandler(debug_file_handler)
if console != None:
console_handler = logging.StreamHandler()
if console == "debug": console_handler.setLevel(logging.DEBUG)
if console == "info": console_handler.setLevel(logging.INFO)
console_handler.setFormatter(base_formatter)
logger.addHandler(console_handler)
def my_handler(type_, value, tb):
logger.info("\n" + "".join(traceback.format_exception(type, value, tb)))
logging.info("Experiment finished (with some errors)")
sys.excepthook = my_handler
| 38.146067
| 92
| 0.671281
|
import torch
import random
import numpy as np
class InfiniteDataLoader(torch.utils.data.DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_iterator = super().__iter__()
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.dataset_iterator)
except StopIteration:
self.dataset_iterator = super().__iter__()
batch = next(self.dataset_iterator)
return batch
def make_deterministic(seed=0):
seed = int(seed)
if seed == -1:
return
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def setup_logging(output_folder, exist_ok=False, console="debug",
info_filename="info.log", debug_filename="debug.log"):
import os
import sys
import logging
import traceback
if not exist_ok and os.path.exists(output_folder):
raise FileExistsError(f"{output_folder} already exists!")
os.makedirs(output_folder, exist_ok=True)
base_formatter = logging.Formatter('%(asctime)s %(message)s', "%Y-%m-%d %H:%M:%S")
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
if info_filename != None:
info_file_handler = logging.FileHandler(f'{output_folder}/{info_filename}')
info_file_handler.setLevel(logging.INFO)
info_file_handler.setFormatter(base_formatter)
logger.addHandler(info_file_handler)
if debug_filename != None:
debug_file_handler = logging.FileHandler(f'{output_folder}/{debug_filename}')
debug_file_handler.setLevel(logging.DEBUG)
debug_file_handler.setFormatter(base_formatter)
logger.addHandler(debug_file_handler)
if console != None:
console_handler = logging.StreamHandler()
if console == "debug": console_handler.setLevel(logging.DEBUG)
if console == "info": console_handler.setLevel(logging.INFO)
console_handler.setFormatter(base_formatter)
logger.addHandler(console_handler)
def my_handler(type_, value, tb):
logger.info("\n" + "".join(traceback.format_exception(type, value, tb)))
logging.info("Experiment finished (with some errors)")
sys.excepthook = my_handler
| true
| true
|
790c12b118c8b5fc677223e0eae5421ec3a2edb3
| 9,694
|
py
|
Python
|
policies/policies_core.py
|
eventbrite/britetech-casper-tools
|
2dd2d647368d3dcb05eefeb6dca20c1c3c7bfd05
|
[
"Apache-2.0"
] | null | null | null |
policies/policies_core.py
|
eventbrite/britetech-casper-tools
|
2dd2d647368d3dcb05eefeb6dca20c1c3c7bfd05
|
[
"Apache-2.0"
] | null | null | null |
policies/policies_core.py
|
eventbrite/britetech-casper-tools
|
2dd2d647368d3dcb05eefeb6dca20c1c3c7bfd05
|
[
"Apache-2.0"
] | null | null | null |
import sys
sys.path.append('..')
from utilities import jamfconfig
from utilities import apirequests
from computergroups import computergroups
import xml.etree.ElementTree as etree
jss_api_base_url = jamfconfig.getJSS_API_URL()
#print("JSS API Base URL: {}".format(jss_api_base_url))
def cleanupOutput(inputString):
#print str(inputString)
return inputString.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(u"\u201c", "\"").replace(u"\u201d", "\"")
def getAllPolicies(username, password):
''' List all policies in JSS to screen '''
#print(username)
print "We're Refactored! Getting All JAMF Policies..."
reqStr = jss_api_base_url + '/policies'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r == -1:
return
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
for policy in responseXml.findall('policy'):
policyName = policy.find('name').text
policyID = policy.find('id').text
print 'Policy ID: ' + policyID + ', ' + 'Policy Name: ' + policyName + '\n'
def getPolicybyId(policyid, username, password):
''' Method to search for Policy ID by ID number and return General Policy Information, Scoping Information, and Package Configuration information - send results to Stdout '''
print 'Running refactored getpolicybyid ...'
reqStr = jss_api_base_url + '/policies/id/' + policyid
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r != -1:
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
general = responseXml.find('general')
## General Policy Information
name = general.find('name').text
policy_id = general.find('id').text
enabled = general.find('enabled').text
trigger = general.find('trigger').text
frequency = general.find('frequency').text
print '\nGENERAL POLICY INFORMATION: '
print 'Policy Name: ' + str(name)
print 'Policy ID #: ' + str(policy_id)
print 'Policy is Enabled: ' + str(enabled)
print 'Policy Trigger: ' + str(trigger)
print 'Policy Frequency: ' + str(frequency)
## Policy Scope Information
scope = responseXml.find('scope')
allcomputers = scope.find('all_computers').text
groups = scope.find('computer_groups')
comp_groups = []
computers = scope.find('computers')
members = []
## Add Header Row for output for info categories
# headerRow = "Computer Name, JSS ID"
# members += [ headerRow ]
for computer in computers.findall('computer'):
# compID = computer.find('id').text
name = computer.find('name').text
computerInfo = str(name)
computerInfo = cleanupOutput(computerInfo)
#print computerInfo.encode('ascii', 'ignore')
members += [ computerInfo ]
for g in groups.findall('computer_group'):
group_name = g.find('name').text
groupInfo = str(group_name)
comp_groups += [ groupInfo ]
print '\nPOLICY SCOPE INFORMATION:'
print 'Scoped to All Computers: ' + str(allcomputers)
print '\nCOMPUTER GROUPS IN SCOPE: '
print '\n'.join (sorted(comp_groups))
if members:
print '\nADDITIONAL COMPUTERS IN SCOPE: '
print '\n'.join (sorted(members))
print '\nTotal Computers in Scope: ' + str(len(members))
## Package Configuration Information
pkgconfig = responseXml.find('package_configuration')
packages = pkgconfig.find('packages')
pkgheaderRow = "Package Name"
pkglist = []
for pkg in packages.findall('package'):
pkg_name = pkg.find('name').text
pkg_action = pkg.find('action').text
pkgInfo = str(pkg_name) + ', ' + str(pkg_action)
pkgInfo = cleanupOutput(pkgInfo)
pkglist += [ pkgInfo ]
print '\nPACKAGE CONFIGURATION: '
print '\n'.join (sorted(pkglist))
else:
print 'Failed to find policy with ID ' + policyid
def listAllPolicies(username, password):
''' List all policies in JSS - for function use '''
reqStr = jss_api_base_url + '/policies'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r == -1:
return
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
PoliciesList = []
for policy in responseXml.findall('policy'):
policyName = policy.find('name').text
policyID = policy.find('id').text
PoliciesList.append({'name': policyName, 'id': policyID})
return PoliciesList
def listAllPolicyIds(username, password):
''' List all policy IDs in JSS - for function use - returns a list of Policy ID #s '''
reqStr = jss_api_base_url + '/policies'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r == -1:
return
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
PolicyIDList = []
for policy in responseXml.findall('policy'):
policyID = policy.find('id').text
PolicyIDList.append(policyID)
return PolicyIDList
def listPolicyStatusbyId(policyid, username, password):
''' Function to search for Policy ID by ID number and return status results for
use in functions '''
reqStr = jss_api_base_url + '/policies/id/' + policyid + '/subset/General'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r != -1:
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
general = responseXml.find('general')
status = general.find('enabled').text
return status
def listPolicyNamebyId(policyid, username, password):
''' Function to search for Policy ID by ID number and return name for
use in functions '''
reqStr = jss_api_base_url + '/policies/id/' + policyid + '/subset/General'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r != -1:
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
general = responseXml.find('general')
name = general.find('name').text
return name
def listPolicyScopebyId(policyid, username, password):
''' Function to search for Policy ID by ID number and return scope details as a
dict for use in functions '''
reqStr = jss_api_base_url + '/policies/id/' + policyid + '/subset/Scope'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
scopeData = []
if r != -1:
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
scope = responseXml.find('scope')
allcomputers = scope.find('all_computers').text
groups = scope.find('computer_groups')
comp_groups = []
comp_groupIDs = []
computers = scope.find('computers')
members = []
scope_details = {}
for comp in computers.findall('computer'):
if comp.find('name').text:
name = comp.find('name').text
members.append(name)
for g in groups.findall('computer_group'):
if g.find('name').text:
group_name = g.find('name').text
groupID = computergroups.getComputerGroupId(group_name, username, password)
comp_groups.append(group_name)
comp_groupIDs.append(groupID)
scope_details = { "Policy ID: ": policyid, "All computers?: ": allcomputers, "Computer groups: ": comp_groups, "Computer group IDs: ": comp_groupIDs, "Specific computers: ": members }
return scope_details
def listPolicyPackagesbyId(policyid, username, password):
''' Function to search for Policy ID by ID number and return package details as a list
for use in functions '''
reqStr = jss_api_base_url + '/policies/id/' + policyid + '/subset/Packages'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
pkglist = []
if r != -1:
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
pkgconfig = responseXml.find('package_configuration')
packages = pkgconfig.find('packages')
if packages.findall('package'):
for pkg in packages.findall('package'):
pkg_name = pkg.find('name').text
pkglist.append(pkg_name)
return pkglist
def listPolicybyId(policyid, username, password):
''' Method to search for Policy ID by ID number and return General Policy Information, Scoping Information, and Package Configuration information - for use in functions '''
reqStr = jss_api_base_url + '/policies/id/' + policyid
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r != -1:
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
policyDict = {}
## General Policy Information
general = responseXml.find('general')
polname = general.find('name').text
policy_id = general.find('id').text
enabled = general.find('enabled').text
trigger = general.find('trigger').text
frequency = general.find('frequency').text
## Policy Scope Information
scope = responseXml.find('scope')
allcomputers = scope.find('all_computers').text
groups = scope.find('computer_groups')
comp_groups = []
computers = scope.find('computers')
members = []
for computer in computers.findall('computer'):
name = computer.find('name').text
computerInfo = name.encode("utf-8")
# computerInfo = cleanupOutput(computerInfo)
# members.append(name)
members += [ computerInfo ]
for g in groups.findall('computer_group'):
group_name = g.find('name').text
groupInfo = str(group_name)
comp_groups += [ groupInfo ]
## Package Configuration Information
pkgconfig = responseXml.find('package_configuration')
packages = pkgconfig.find('packages')
pkglist = []
for pkg in packages.findall('package'):
pkg_name = pkg.find('name').text
pkg_action = pkg.find('action').text
pkglist.append({"Package Name": pkg_name, "Package Action": pkg_action})
## Add policy details to policyDict and return
policyDict = { "Policy Name": polname,
"Policy ID": policy_id,
"Policy Enabled": enabled,
"Policy Trigger": trigger,
"Policy Frequency": frequency,
"All Computers in Scope": allcomputers,
"Scoped Computers": members,
"Scoped Computer Groups": comp_groups,
"Package Configuration": pkglist
}
return policyDict
else:
print 'Failed to find policy with ID ' + policyid
| 28.345029
| 185
| 0.702909
|
import sys
sys.path.append('..')
from utilities import jamfconfig
from utilities import apirequests
from computergroups import computergroups
import xml.etree.ElementTree as etree
jss_api_base_url = jamfconfig.getJSS_API_URL()
def cleanupOutput(inputString):
return inputString.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(u"\u201c", "\"").replace(u"\u201d", "\"")
def getAllPolicies(username, password):
''' List all policies in JSS to screen '''
print "We're Refactored! Getting All JAMF Policies..."
reqStr = jss_api_base_url + '/policies'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r == -1:
return
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
for policy in responseXml.findall('policy'):
policyName = policy.find('name').text
policyID = policy.find('id').text
print 'Policy ID: ' + policyID + ', ' + 'Policy Name: ' + policyName + '\n'
def getPolicybyId(policyid, username, password):
''' Method to search for Policy ID by ID number and return General Policy Information, Scoping Information, and Package Configuration information - send results to Stdout '''
print 'Running refactored getpolicybyid ...'
reqStr = jss_api_base_url + '/policies/id/' + policyid
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r != -1:
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
general = responseXml.find('general')
## General Policy Information
name = general.find('name').text
policy_id = general.find('id').text
enabled = general.find('enabled').text
trigger = general.find('trigger').text
frequency = general.find('frequency').text
print '\nGENERAL POLICY INFORMATION: '
print 'Policy Name: ' + str(name)
print 'Policy ID
print 'Policy is Enabled: ' + str(enabled)
print 'Policy Trigger: ' + str(trigger)
print 'Policy Frequency: ' + str(frequency)
## Policy Scope Information
scope = responseXml.find('scope')
allcomputers = scope.find('all_computers').text
groups = scope.find('computer_groups')
comp_groups = []
computers = scope.find('computers')
members = []
## Add Header Row for output for info categories
# headerRow = "Computer Name, JSS ID"
# members += [ headerRow ]
for computer in computers.findall('computer'):
# compID = computer.find('id').text
name = computer.find('name').text
computerInfo = str(name)
computerInfo = cleanupOutput(computerInfo)
#print computerInfo.encode('ascii', 'ignore')
members += [ computerInfo ]
for g in groups.findall('computer_group'):
group_name = g.find('name').text
groupInfo = str(group_name)
comp_groups += [ groupInfo ]
print '\nPOLICY SCOPE INFORMATION:'
print 'Scoped to All Computers: ' + str(allcomputers)
print '\nCOMPUTER GROUPS IN SCOPE: '
print '\n'.join (sorted(comp_groups))
if members:
print '\nADDITIONAL COMPUTERS IN SCOPE: '
print '\n'.join (sorted(members))
print '\nTotal Computers in Scope: ' + str(len(members))
## Package Configuration Information
pkgconfig = responseXml.find('package_configuration')
packages = pkgconfig.find('packages')
pkgheaderRow = "Package Name"
pkglist = []
for pkg in packages.findall('package'):
pkg_name = pkg.find('name').text
pkg_action = pkg.find('action').text
pkgInfo = str(pkg_name) + ', ' + str(pkg_action)
pkgInfo = cleanupOutput(pkgInfo)
pkglist += [ pkgInfo ]
print '\nPACKAGE CONFIGURATION: '
print '\n'.join (sorted(pkglist))
else:
print 'Failed to find policy with ID ' + policyid
def listAllPolicies(username, password):
''' List all policies in JSS - for function use '''
reqStr = jss_api_base_url + '/policies'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r == -1:
return
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
PoliciesList = []
for policy in responseXml.findall('policy'):
policyName = policy.find('name').text
policyID = policy.find('id').text
PoliciesList.append({'name': policyName, 'id': policyID})
return PoliciesList
def listAllPolicyIds(username, password):
''' List all policy IDs in JSS - for function use - returns a list of Policy ID #s '''
reqStr = jss_api_base_url + '/policies'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r == -1:
return
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
PolicyIDList = []
for policy in responseXml.findall('policy'):
policyID = policy.find('id').text
PolicyIDList.append(policyID)
return PolicyIDList
def listPolicyStatusbyId(policyid, username, password):
''' Function to search for Policy ID by ID number and return status results for
use in functions '''
reqStr = jss_api_base_url + '/policies/id/' + policyid + '/subset/General'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r != -1:
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
general = responseXml.find('general')
status = general.find('enabled').text
return status
def listPolicyNamebyId(policyid, username, password):
''' Function to search for Policy ID by ID number and return name for
use in functions '''
reqStr = jss_api_base_url + '/policies/id/' + policyid + '/subset/General'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r != -1:
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
general = responseXml.find('general')
name = general.find('name').text
return name
def listPolicyScopebyId(policyid, username, password):
''' Function to search for Policy ID by ID number and return scope details as a
dict for use in functions '''
reqStr = jss_api_base_url + '/policies/id/' + policyid + '/subset/Scope'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
scopeData = []
if r != -1:
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
scope = responseXml.find('scope')
allcomputers = scope.find('all_computers').text
groups = scope.find('computer_groups')
comp_groups = []
comp_groupIDs = []
computers = scope.find('computers')
members = []
scope_details = {}
for comp in computers.findall('computer'):
if comp.find('name').text:
name = comp.find('name').text
members.append(name)
for g in groups.findall('computer_group'):
if g.find('name').text:
group_name = g.find('name').text
groupID = computergroups.getComputerGroupId(group_name, username, password)
comp_groups.append(group_name)
comp_groupIDs.append(groupID)
scope_details = { "Policy ID: ": policyid, "All computers?: ": allcomputers, "Computer groups: ": comp_groups, "Computer group IDs: ": comp_groupIDs, "Specific computers: ": members }
return scope_details
def listPolicyPackagesbyId(policyid, username, password):
''' Function to search for Policy ID by ID number and return package details as a list
for use in functions '''
reqStr = jss_api_base_url + '/policies/id/' + policyid + '/subset/Packages'
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
pkglist = []
if r != -1:
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
pkgconfig = responseXml.find('package_configuration')
packages = pkgconfig.find('packages')
if packages.findall('package'):
for pkg in packages.findall('package'):
pkg_name = pkg.find('name').text
pkglist.append(pkg_name)
return pkglist
def listPolicybyId(policyid, username, password):
''' Method to search for Policy ID by ID number and return General Policy Information, Scoping Information, and Package Configuration information - for use in functions '''
reqStr = jss_api_base_url + '/policies/id/' + policyid
r = apirequests.sendAPIRequest(reqStr, username, password, 'GET')
if r != -1:
baseXml = r.read()
responseXml = etree.fromstring(baseXml)
policyDict = {}
## General Policy Information
general = responseXml.find('general')
polname = general.find('name').text
policy_id = general.find('id').text
enabled = general.find('enabled').text
trigger = general.find('trigger').text
frequency = general.find('frequency').text
## Policy Scope Information
scope = responseXml.find('scope')
allcomputers = scope.find('all_computers').text
groups = scope.find('computer_groups')
comp_groups = []
computers = scope.find('computers')
members = []
for computer in computers.findall('computer'):
name = computer.find('name').text
computerInfo = name.encode("utf-8")
# computerInfo = cleanupOutput(computerInfo)
# members.append(name)
members += [ computerInfo ]
for g in groups.findall('computer_group'):
group_name = g.find('name').text
groupInfo = str(group_name)
comp_groups += [ groupInfo ]
## Package Configuration Information
pkgconfig = responseXml.find('package_configuration')
packages = pkgconfig.find('packages')
pkglist = []
for pkg in packages.findall('package'):
pkg_name = pkg.find('name').text
pkg_action = pkg.find('action').text
pkglist.append({"Package Name": pkg_name, "Package Action": pkg_action})
## Add policy details to policyDict and return
policyDict = { "Policy Name": polname,
"Policy ID": policy_id,
"Policy Enabled": enabled,
"Policy Trigger": trigger,
"Policy Frequency": frequency,
"All Computers in Scope": allcomputers,
"Scoped Computers": members,
"Scoped Computer Groups": comp_groups,
"Package Configuration": pkglist
}
return policyDict
else:
print 'Failed to find policy with ID ' + policyid
| false
| true
|
790c138a3e179991434973e9c0575166d21d0d4c
| 202
|
py
|
Python
|
Programming Languages & Libraries/Python/Python Complete Bootcamp/__name__ and __main__/one.py
|
ttotoc/codebook
|
2085e2e29cad9510ba9017e0a760cd0d2d4a734e
|
[
"MIT"
] | null | null | null |
Programming Languages & Libraries/Python/Python Complete Bootcamp/__name__ and __main__/one.py
|
ttotoc/codebook
|
2085e2e29cad9510ba9017e0a760cd0d2d4a734e
|
[
"MIT"
] | null | null | null |
Programming Languages & Libraries/Python/Python Complete Bootcamp/__name__ and __main__/one.py
|
ttotoc/codebook
|
2085e2e29cad9510ba9017e0a760cd0d2d4a734e
|
[
"MIT"
] | null | null | null |
#one.py
print('hello')
def func():
print("Func() in one.py")
print("TOP LEVEL IN one.py")
if __name__ == "__main__":
print("one.py is being run directly!")
else:
print("one.py has been imported!")
| 16.833333
| 39
| 0.658416
|
print('hello')
def func():
print("Func() in one.py")
print("TOP LEVEL IN one.py")
if __name__ == "__main__":
print("one.py is being run directly!")
else:
print("one.py has been imported!")
| true
| true
|
790c13d0abc3672c73d694754c277c3acb883e8b
| 14,299
|
py
|
Python
|
corehq/apps/appstore/views.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/appstore/views.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | 1
|
2022-03-12T01:03:25.000Z
|
2022-03-12T01:03:25.000Z
|
corehq/apps/appstore/views.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from urllib import urlencode
from corehq.apps.registration.utils import create_30_day_trial
from dimagi.utils.couch import CriticalSection
from dimagi.utils.couch.resource_conflict import retry_resource
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.template.loader import render_to_string
from django.shortcuts import render
from django.contrib import messages
from dimagi.utils.name_to_url import name_to_url
from django.utils.translation import ugettext as _, ugettext_lazy
from corehq.apps.app_manager.views.apps import clear_app_cache
from corehq.apps.domain.decorators import require_superuser
from corehq.apps.domain.exceptions import NameUnavailableException
from corehq.elastic import es_query, parse_args_for_es, fill_mapping_with_facets
from corehq.apps.domain.models import Domain
from dimagi.utils.couch.database import apply_update
from corehq.apps.fixtures.models import FixtureDataType
SNAPSHOT_FACETS = ['project_type', 'license', 'author.exact', 'is_starter_app']
DEPLOYMENT_FACETS = ['deployment.region']
SNAPSHOT_MAPPING = [
("", True, [
{"facet": "project_type", "name": ugettext_lazy("Category"), "expanded": True},
{
"facet": "license",
"name": ugettext_lazy("License"),
"expanded": True,
"mapping": {
'cc': 'CC BY',
'cc-sa': 'CC BY-SA',
'cc-nd': 'CC BY-ND',
'cc-nc': 'CC BY-NC',
'cc-nc-sa': 'CC BY-NC-SA',
'cc-nc-nd': 'CC BY-NC-ND',
}
},
{"facet": "author.exact", "name": ugettext_lazy("Author"), "expanded": True},
]),
]
DEPLOYMENT_MAPPING = [
("", True, [
{"facet": "deployment.region", "name": "Region", "expanded": True},
]),
]
def rewrite_url(request, path):
return HttpResponseRedirect('/exchange%s?%s' % (path, request.META['QUERY_STRING']))
def inverse_dict(d):
return dict([(v, k) for k, v in d.iteritems()])
def can_view_app(req, dom):
if not dom or not dom.is_snapshot or not dom.published:
return False
if not dom.is_approved and (
not getattr(req, "couch_user", "") or not req.couch_user.is_domain_admin(dom.copied_from.name)
):
return False
return True
def project_info(request, domain, template="appstore/project_info.html"):
dom = Domain.get(domain)
if not can_view_app(request, dom):
raise Http404()
copies = dom.copies_of_parent()
images = set()
audio = set()
return render(request, template, {
"project": dom,
"applications": dom.full_applications(include_builds=False),
"fixtures": FixtureDataType.by_domain(dom.name),
"copies": copies,
"images": images,
"audio": audio,
"url_base": reverse('appstore'),
'display_import': True if getattr(request, "couch_user",
"") and request.couch_user.get_domains() else False
})
def deduplicate(hits):
unique_names = set()
unique_hits = []
for hit in hits:
if not hit['_source']['name'] in unique_names:
unique_hits.append(hit)
unique_names.add(hit['_source']['name'])
return unique_hits
def appstore(request, template="appstore/appstore_base.html"):
page_length = 10
include_unapproved = True if request.GET.get('is_approved', "") == "false" else False
if include_unapproved and not request.user.is_superuser:
raise Http404()
params, _ = parse_args_for_es(request)
page = params.pop('page', 1)
page = int(page[0] if isinstance(page, list) else page)
results = es_snapshot_query(params, SNAPSHOT_FACETS)
hits = results.get('hits', {}).get('hits', [])
hits = deduplicate(hits)
d_results = [Domain.wrap(res['_source']) for res in hits]
starter_apps = request.GET.get('is_starter_app', None)
sort_by = request.GET.get('sort_by', None)
if sort_by == 'newest':
pass
else:
d_results = Domain.hit_sort(d_results)
persistent_params = {}
if sort_by:
persistent_params["sort_by"] = sort_by
if include_unapproved:
persistent_params["is_approved"] = "false"
persistent_params = urlencode(persistent_params) # json.dumps(persistent_params)
more_pages = False if len(d_results) <= page * page_length else True
facet_map = fill_mapping_with_facets(SNAPSHOT_MAPPING, results, params)
vals = dict(
apps=d_results[(page - 1) * page_length:page * page_length],
page=page,
prev_page=(page - 1),
next_page=(page + 1),
more_pages=more_pages,
sort_by=sort_by,
show_starter_apps=starter_apps,
include_unapproved=include_unapproved,
facet_map=facet_map,
facets=results.get("facets", []),
query_str=request.META['QUERY_STRING'],
search_query=params.get('search', [""])[0],
persistent_params=persistent_params,
)
return render(request, template, vals)
def appstore_api(request):
params, facets = parse_args_for_es(request)
results = es_snapshot_query(params, facets)
return HttpResponse(json.dumps(results), content_type="application/json")
def es_snapshot_query(params, facets=None, terms=None, sort_by="snapshot_time"):
if terms is None:
terms = ['is_approved', 'sort_by', 'search']
if facets is None:
facets = []
q = {"sort": {sort_by: {"order": "desc"}},
"query": {"bool": {"must": [
{"match": {'doc_type': "Domain"}},
{"term": {"published": True}},
{"term": {"is_snapshot": True}}
]}},
"filter": {"and": [{"term": {"is_approved": params.get('is_approved', None) or True}}]}}
search_query = params.get('search', "")
if search_query:
q['query']['bool']['must'].append({
"match": {
"_all": {
"query": search_query,
"operator": "and"
}
}
})
return es_query(params, facets, terms, q)
@require_superuser
def approve_app(request, domain):
domain = Domain.get(domain)
if request.GET.get('approve') == 'true':
domain.is_approved = True
domain.save()
elif request.GET.get('approve') == 'false':
domain.is_approved = False
domain.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER') or reverse('appstore'))
@login_required
@retry_resource(3)
def import_app(request, domain):
user = request.couch_user
if not user.is_eula_signed():
messages.error(request, 'You must agree to our eula to download an app')
return project_info(request, domain)
from_project = Domain.get(domain)
if request.method == 'POST' and from_project.is_snapshot:
if not from_project.published:
messages.error(request, "This project is not published and can't be downloaded")
return project_info(request, domain)
to_project_name = request.POST['project']
if not user.is_member_of(to_project_name):
messages.error(request, _("You don't belong to that project"))
return project_info(request, domain)
full_apps = from_project.full_applications(include_builds=False)
assert full_apps, 'Bad attempt to copy apps from a project without any!'
for app in full_apps:
new_doc = from_project.copy_component(app['doc_type'], app.get_id, to_project_name, user)
clear_app_cache(request, to_project_name)
from_project.downloads += 1
from_project.save()
messages.success(request, render_to_string("appstore/partials/view_wiki.html",
{"pre": _("Application successfully imported!")}),
extra_tags="html")
return HttpResponseRedirect(reverse('view_app', args=[to_project_name, new_doc.id]))
else:
return HttpResponseRedirect(reverse('project_info', args=[domain]))
@login_required
def copy_snapshot(request, domain):
user = request.couch_user
if not user.is_eula_signed():
messages.error(request, 'You must agree to our eula to download an app')
return project_info(request, domain)
dom = Domain.get(domain)
if request.method == "POST" and dom.is_snapshot:
assert dom.full_applications(include_builds=False), 'Bad attempt to copy project without any apps!'
from corehq.apps.registration.forms import DomainRegistrationForm
args = {
'domain_name': request.POST['new_project_name'],
'hr_name': request.POST['new_project_name'],
'eula_confirmed': True,
}
form = DomainRegistrationForm(args)
if request.POST.get('new_project_name', ""):
if not dom.published:
messages.error(request, _("This project is not published and can't be downloaded"))
return project_info(request, domain)
if not form.is_valid():
messages.error(request, form.errors)
return project_info(request, domain)
new_domain_name = name_to_url(form.cleaned_data['hr_name'], "project")
with CriticalSection(['copy_domain_snapshot_{}_to_{}'.format(dom.name, new_domain_name)]):
try:
new_domain = dom.save_copy(new_domain_name,
new_hr_name=form.cleaned_data['hr_name'],
user=user)
except NameUnavailableException:
messages.error(request, _("A project by that name already exists"))
return project_info(request, domain)
# sign new project up for trial
create_30_day_trial(new_domain)
def inc_downloads(d):
d.downloads += 1
apply_update(dom, inc_downloads)
messages.success(request, render_to_string("appstore/partials/view_wiki.html",
{"pre": _("Project copied successfully!")}),
extra_tags="html")
return HttpResponseRedirect(reverse('view_app',
args=[new_domain.name, new_domain.full_applications()[0].get_id]))
else:
messages.error(request, _("You must specify a name for the new project"))
return project_info(request, domain)
else:
return HttpResponseRedirect(reverse('project_info', args=[domain]))
def project_image(request, domain):
project = Domain.get(domain)
if project.image_path:
image = project.fetch_attachment(project.image_path)
return HttpResponse(image, content_type=project.image_type)
else:
raise Http404()
def project_documentation_file(request, domain):
project = Domain.get(domain)
if project.documentation_file_path:
documentation_file = project.fetch_attachment(project.documentation_file_path)
return HttpResponse(documentation_file, content_type=project.documentation_file_type)
else:
raise Http404()
@login_required
def deployment_info(request, domain, template="appstore/deployment_info.html"):
dom = Domain.get_by_name(domain)
if not dom or not dom.deployment.public:
raise Http404()
# get facets
results = es_deployments_query({}, DEPLOYMENT_FACETS)
facet_map = fill_mapping_with_facets(DEPLOYMENT_MAPPING, results, {})
return render(request, template, {
'domain': dom,
'search_url': reverse('deployments'),
'url_base': reverse('deployments'),
'facet_map': facet_map,
})
@login_required
def deployments(request, template="appstore/deployments.html"):
params, _ = parse_args_for_es(request)
params = dict([(DEPLOYMENT_MAPPING.get(p, p), params[p]) for p in params])
page = int(params.pop('page', 1))
results = es_deployments_query(params, DEPLOYMENT_FACETS)
d_results = [Domain.wrap(res['_source']) for res in results['hits']['hits']]
more_pages = False if len(d_results) <= page * 10 else True
facet_map = fill_mapping_with_facets(DEPLOYMENT_MAPPING, results, params)
include_unapproved = True if request.GET.get('is_approved', "") == "false" else False
vals = {'deployments': d_results[(page - 1) * 10:page * 10],
'page': page,
'prev_page': page - 1,
'next_page': (page + 1),
'more_pages': more_pages,
'include_unapproved': include_unapproved,
'facet_map': facet_map,
'query_str': request.META['QUERY_STRING'],
'search_url': reverse('deployments'),
'search_query': params.get('search', [""])[0]}
return render(request, template, vals)
def deployments_api(request):
params, facets = parse_args_for_es(request)
params = dict([(DEPLOYMENT_MAPPING.get(p, p), params[p]) for p in params])
results = es_deployments_query(params, facets)
return HttpResponse(json.dumps(results), content_type="application/json")
def es_deployments_query(params, facets=None, terms=None, sort_by="snapshot_time"):
if terms is None:
terms = ['is_approved', 'sort_by', 'search']
if facets is None:
facets = []
q = {"query": {"bool": {"must": [{"match": {'doc_type': "Domain"}},
{"term": {"deployment.public": True}}]}}}
search_query = params.get('search', "")
if search_query:
q['query']['bool']['must'].append({
"match": {
"_all": {
"query": search_query,
"operator": "and"
}
}
})
return es_query(params, facets, terms, q)
def media_files(request, domain, template="appstore/media_files.html"):
dom = Domain.get(domain)
if not can_view_app(request, dom):
raise Http404()
return render(request, template, {
"project": dom,
"url_base": reverse('appstore')
})
| 37.044041
| 114
| 0.628575
|
import json
from urllib import urlencode
from corehq.apps.registration.utils import create_30_day_trial
from dimagi.utils.couch import CriticalSection
from dimagi.utils.couch.resource_conflict import retry_resource
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.template.loader import render_to_string
from django.shortcuts import render
from django.contrib import messages
from dimagi.utils.name_to_url import name_to_url
from django.utils.translation import ugettext as _, ugettext_lazy
from corehq.apps.app_manager.views.apps import clear_app_cache
from corehq.apps.domain.decorators import require_superuser
from corehq.apps.domain.exceptions import NameUnavailableException
from corehq.elastic import es_query, parse_args_for_es, fill_mapping_with_facets
from corehq.apps.domain.models import Domain
from dimagi.utils.couch.database import apply_update
from corehq.apps.fixtures.models import FixtureDataType
SNAPSHOT_FACETS = ['project_type', 'license', 'author.exact', 'is_starter_app']
DEPLOYMENT_FACETS = ['deployment.region']
SNAPSHOT_MAPPING = [
("", True, [
{"facet": "project_type", "name": ugettext_lazy("Category"), "expanded": True},
{
"facet": "license",
"name": ugettext_lazy("License"),
"expanded": True,
"mapping": {
'cc': 'CC BY',
'cc-sa': 'CC BY-SA',
'cc-nd': 'CC BY-ND',
'cc-nc': 'CC BY-NC',
'cc-nc-sa': 'CC BY-NC-SA',
'cc-nc-nd': 'CC BY-NC-ND',
}
},
{"facet": "author.exact", "name": ugettext_lazy("Author"), "expanded": True},
]),
]
DEPLOYMENT_MAPPING = [
("", True, [
{"facet": "deployment.region", "name": "Region", "expanded": True},
]),
]
def rewrite_url(request, path):
return HttpResponseRedirect('/exchange%s?%s' % (path, request.META['QUERY_STRING']))
def inverse_dict(d):
return dict([(v, k) for k, v in d.iteritems()])
def can_view_app(req, dom):
if not dom or not dom.is_snapshot or not dom.published:
return False
if not dom.is_approved and (
not getattr(req, "couch_user", "") or not req.couch_user.is_domain_admin(dom.copied_from.name)
):
return False
return True
def project_info(request, domain, template="appstore/project_info.html"):
dom = Domain.get(domain)
if not can_view_app(request, dom):
raise Http404()
copies = dom.copies_of_parent()
images = set()
audio = set()
return render(request, template, {
"project": dom,
"applications": dom.full_applications(include_builds=False),
"fixtures": FixtureDataType.by_domain(dom.name),
"copies": copies,
"images": images,
"audio": audio,
"url_base": reverse('appstore'),
'display_import': True if getattr(request, "couch_user",
"") and request.couch_user.get_domains() else False
})
def deduplicate(hits):
unique_names = set()
unique_hits = []
for hit in hits:
if not hit['_source']['name'] in unique_names:
unique_hits.append(hit)
unique_names.add(hit['_source']['name'])
return unique_hits
def appstore(request, template="appstore/appstore_base.html"):
page_length = 10
include_unapproved = True if request.GET.get('is_approved', "") == "false" else False
if include_unapproved and not request.user.is_superuser:
raise Http404()
params, _ = parse_args_for_es(request)
page = params.pop('page', 1)
page = int(page[0] if isinstance(page, list) else page)
results = es_snapshot_query(params, SNAPSHOT_FACETS)
hits = results.get('hits', {}).get('hits', [])
hits = deduplicate(hits)
d_results = [Domain.wrap(res['_source']) for res in hits]
starter_apps = request.GET.get('is_starter_app', None)
sort_by = request.GET.get('sort_by', None)
if sort_by == 'newest':
pass
else:
d_results = Domain.hit_sort(d_results)
persistent_params = {}
if sort_by:
persistent_params["sort_by"] = sort_by
if include_unapproved:
persistent_params["is_approved"] = "false"
persistent_params = urlencode(persistent_params)
more_pages = False if len(d_results) <= page * page_length else True
facet_map = fill_mapping_with_facets(SNAPSHOT_MAPPING, results, params)
vals = dict(
apps=d_results[(page - 1) * page_length:page * page_length],
page=page,
prev_page=(page - 1),
next_page=(page + 1),
more_pages=more_pages,
sort_by=sort_by,
show_starter_apps=starter_apps,
include_unapproved=include_unapproved,
facet_map=facet_map,
facets=results.get("facets", []),
query_str=request.META['QUERY_STRING'],
search_query=params.get('search', [""])[0],
persistent_params=persistent_params,
)
return render(request, template, vals)
def appstore_api(request):
params, facets = parse_args_for_es(request)
results = es_snapshot_query(params, facets)
return HttpResponse(json.dumps(results), content_type="application/json")
def es_snapshot_query(params, facets=None, terms=None, sort_by="snapshot_time"):
if terms is None:
terms = ['is_approved', 'sort_by', 'search']
if facets is None:
facets = []
q = {"sort": {sort_by: {"order": "desc"}},
"query": {"bool": {"must": [
{"match": {'doc_type': "Domain"}},
{"term": {"published": True}},
{"term": {"is_snapshot": True}}
]}},
"filter": {"and": [{"term": {"is_approved": params.get('is_approved', None) or True}}]}}
search_query = params.get('search', "")
if search_query:
q['query']['bool']['must'].append({
"match": {
"_all": {
"query": search_query,
"operator": "and"
}
}
})
return es_query(params, facets, terms, q)
@require_superuser
def approve_app(request, domain):
domain = Domain.get(domain)
if request.GET.get('approve') == 'true':
domain.is_approved = True
domain.save()
elif request.GET.get('approve') == 'false':
domain.is_approved = False
domain.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER') or reverse('appstore'))
@login_required
@retry_resource(3)
def import_app(request, domain):
user = request.couch_user
if not user.is_eula_signed():
messages.error(request, 'You must agree to our eula to download an app')
return project_info(request, domain)
from_project = Domain.get(domain)
if request.method == 'POST' and from_project.is_snapshot:
if not from_project.published:
messages.error(request, "This project is not published and can't be downloaded")
return project_info(request, domain)
to_project_name = request.POST['project']
if not user.is_member_of(to_project_name):
messages.error(request, _("You don't belong to that project"))
return project_info(request, domain)
full_apps = from_project.full_applications(include_builds=False)
assert full_apps, 'Bad attempt to copy apps from a project without any!'
for app in full_apps:
new_doc = from_project.copy_component(app['doc_type'], app.get_id, to_project_name, user)
clear_app_cache(request, to_project_name)
from_project.downloads += 1
from_project.save()
messages.success(request, render_to_string("appstore/partials/view_wiki.html",
{"pre": _("Application successfully imported!")}),
extra_tags="html")
return HttpResponseRedirect(reverse('view_app', args=[to_project_name, new_doc.id]))
else:
return HttpResponseRedirect(reverse('project_info', args=[domain]))
@login_required
def copy_snapshot(request, domain):
user = request.couch_user
if not user.is_eula_signed():
messages.error(request, 'You must agree to our eula to download an app')
return project_info(request, domain)
dom = Domain.get(domain)
if request.method == "POST" and dom.is_snapshot:
assert dom.full_applications(include_builds=False), 'Bad attempt to copy project without any apps!'
from corehq.apps.registration.forms import DomainRegistrationForm
args = {
'domain_name': request.POST['new_project_name'],
'hr_name': request.POST['new_project_name'],
'eula_confirmed': True,
}
form = DomainRegistrationForm(args)
if request.POST.get('new_project_name', ""):
if not dom.published:
messages.error(request, _("This project is not published and can't be downloaded"))
return project_info(request, domain)
if not form.is_valid():
messages.error(request, form.errors)
return project_info(request, domain)
new_domain_name = name_to_url(form.cleaned_data['hr_name'], "project")
with CriticalSection(['copy_domain_snapshot_{}_to_{}'.format(dom.name, new_domain_name)]):
try:
new_domain = dom.save_copy(new_domain_name,
new_hr_name=form.cleaned_data['hr_name'],
user=user)
except NameUnavailableException:
messages.error(request, _("A project by that name already exists"))
return project_info(request, domain)
# sign new project up for trial
create_30_day_trial(new_domain)
def inc_downloads(d):
d.downloads += 1
apply_update(dom, inc_downloads)
messages.success(request, render_to_string("appstore/partials/view_wiki.html",
{"pre": _("Project copied successfully!")}),
extra_tags="html")
return HttpResponseRedirect(reverse('view_app',
args=[new_domain.name, new_domain.full_applications()[0].get_id]))
else:
messages.error(request, _("You must specify a name for the new project"))
return project_info(request, domain)
else:
return HttpResponseRedirect(reverse('project_info', args=[domain]))
def project_image(request, domain):
project = Domain.get(domain)
if project.image_path:
image = project.fetch_attachment(project.image_path)
return HttpResponse(image, content_type=project.image_type)
else:
raise Http404()
def project_documentation_file(request, domain):
project = Domain.get(domain)
if project.documentation_file_path:
documentation_file = project.fetch_attachment(project.documentation_file_path)
return HttpResponse(documentation_file, content_type=project.documentation_file_type)
else:
raise Http404()
@login_required
def deployment_info(request, domain, template="appstore/deployment_info.html"):
dom = Domain.get_by_name(domain)
if not dom or not dom.deployment.public:
raise Http404()
# get facets
results = es_deployments_query({}, DEPLOYMENT_FACETS)
facet_map = fill_mapping_with_facets(DEPLOYMENT_MAPPING, results, {})
return render(request, template, {
'domain': dom,
'search_url': reverse('deployments'),
'url_base': reverse('deployments'),
'facet_map': facet_map,
})
@login_required
def deployments(request, template="appstore/deployments.html"):
params, _ = parse_args_for_es(request)
params = dict([(DEPLOYMENT_MAPPING.get(p, p), params[p]) for p in params])
page = int(params.pop('page', 1))
results = es_deployments_query(params, DEPLOYMENT_FACETS)
d_results = [Domain.wrap(res['_source']) for res in results['hits']['hits']]
more_pages = False if len(d_results) <= page * 10 else True
facet_map = fill_mapping_with_facets(DEPLOYMENT_MAPPING, results, params)
include_unapproved = True if request.GET.get('is_approved', "") == "false" else False
vals = {'deployments': d_results[(page - 1) * 10:page * 10],
'page': page,
'prev_page': page - 1,
'next_page': (page + 1),
'more_pages': more_pages,
'include_unapproved': include_unapproved,
'facet_map': facet_map,
'query_str': request.META['QUERY_STRING'],
'search_url': reverse('deployments'),
'search_query': params.get('search', [""])[0]}
return render(request, template, vals)
def deployments_api(request):
params, facets = parse_args_for_es(request)
params = dict([(DEPLOYMENT_MAPPING.get(p, p), params[p]) for p in params])
results = es_deployments_query(params, facets)
return HttpResponse(json.dumps(results), content_type="application/json")
def es_deployments_query(params, facets=None, terms=None, sort_by="snapshot_time"):
if terms is None:
terms = ['is_approved', 'sort_by', 'search']
if facets is None:
facets = []
q = {"query": {"bool": {"must": [{"match": {'doc_type': "Domain"}},
{"term": {"deployment.public": True}}]}}}
search_query = params.get('search', "")
if search_query:
q['query']['bool']['must'].append({
"match": {
"_all": {
"query": search_query,
"operator": "and"
}
}
})
return es_query(params, facets, terms, q)
def media_files(request, domain, template="appstore/media_files.html"):
dom = Domain.get(domain)
if not can_view_app(request, dom):
raise Http404()
return render(request, template, {
"project": dom,
"url_base": reverse('appstore')
})
| true
| true
|
790c15b2e9cbcc86c252142307b53d3b174d9891
| 892
|
py
|
Python
|
tests/test_elementwise_mul_op.py
|
winston-zillow/paddle2onnx
|
65b9b8a628f6dc65109c30106e58174fcaa5845b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_elementwise_mul_op.py
|
winston-zillow/paddle2onnx
|
65b9b8a628f6dc65109c30106e58174fcaa5845b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_elementwise_mul_op.py
|
winston-zillow/paddle2onnx
|
65b9b8a628f6dc65109c30106e58174fcaa5845b
|
[
"Apache-2.0"
] | 2
|
2021-01-19T03:54:14.000Z
|
2021-11-17T00:36:05.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
from test_elementwise_add_op import TestElementwiseAddOp
class TestElementwiseMulOp(TestElementwiseAddOp):
def init(self):
self.op_type = 'elementwise_mul'
if __name__ == '__main__':
unittest.main()
| 31.857143
| 74
| 0.765695
|
import unittest
import numpy as np
from op_test import OpTest
from test_elementwise_add_op import TestElementwiseAddOp
class TestElementwiseMulOp(TestElementwiseAddOp):
def init(self):
self.op_type = 'elementwise_mul'
if __name__ == '__main__':
unittest.main()
| true
| true
|
790c172e93f822e27eb77e8e022b03ffccc87b9a
| 250,936
|
py
|
Python
|
gluon/tools.py
|
kvk3008/project
|
8da5808a6bc8dcab74ca25c3d80c7c3dcd935883
|
[
"BSD-3-Clause"
] | null | null | null |
gluon/tools.py
|
kvk3008/project
|
8da5808a6bc8dcab74ca25c3d80c7c3dcd935883
|
[
"BSD-3-Clause"
] | null | null | null |
gluon/tools.py
|
kvk3008/project
|
8da5808a6bc8dcab74ca25c3d80c7c3dcd935883
|
[
"BSD-3-Clause"
] | null | null | null |
#!/bin/python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Auth, Mail, PluginManager and various utilities
------------------------------------------------
"""
import base64
try:
import cPickle as pickle
except:
import pickle
import datetime
import thread
import logging
import sys
import glob
import os
import re
import time
import traceback
import smtplib
import urllib
import urllib2
import Cookie
import cStringIO
import ConfigParser
import email.utils
import random
from email import MIMEBase, MIMEMultipart, MIMEText, Encoders, Header, message_from_string, Charset
from gluon.contenttype import contenttype
from gluon.storage import Storage, StorageList, Settings, Messages
from gluon.utils import web2py_uuid
from gluon.fileutils import read_file, check_credentials
from gluon import *
from gluon.contrib.autolinks import expand_one
from gluon.contrib.markmin.markmin2html import \
replace_at_urls, replace_autolinks, replace_components
from pydal.objects import Row, Set, Query
import gluon.serializers as serializers
Table = DAL.Table
Field = DAL.Field
try:
# try stdlib (Python 2.6)
import json as json_parser
except ImportError:
try:
# try external module
import simplejson as json_parser
except:
# fallback to pure-Python module
import gluon.contrib.simplejson as json_parser
__all__ = ['Mail', 'Auth', 'Recaptcha', 'Crud', 'Service', 'Wiki',
'PluginManager', 'fetch', 'geocode', 'reverse_geocode', 'prettydate']
### mind there are two loggers here (logger and crud.settings.logger)!
logger = logging.getLogger("web2py")
DEFAULT = lambda: None
def getarg(position, default=None):
args = current.request.args
if position < 0 and len(args) >= -position:
return args[position]
elif position >= 0 and len(args) > position:
return args[position]
else:
return default
def callback(actions, form, tablename=None):
if actions:
if tablename and isinstance(actions, dict):
actions = actions.get(tablename, [])
if not isinstance(actions, (list, tuple)):
actions = [actions]
[action(form) for action in actions]
def validators(*a):
b = []
for item in a:
if isinstance(item, (list, tuple)):
b = b + list(item)
else:
b.append(item)
return b
def call_or_redirect(f, *args):
if callable(f):
redirect(f(*args))
else:
redirect(f)
def replace_id(url, form):
if url:
url = url.replace('[id]', str(form.vars.id))
if url[0] == '/' or url[:4] == 'http':
return url
return URL(url)
class Mail(object):
"""
Class for configuring and sending emails with alternative text / html
body, multiple attachments and encryption support
Works with SMTP and Google App Engine.
Args:
server: SMTP server address in address:port notation
sender: sender email address
login: sender login name and password in login:password notation
or None if no authentication is required
tls: enables/disables encryption (True by default)
In Google App Engine use ::
server='gae'
For sake of backward compatibility all fields are optional and default
to None, however, to be able to send emails at least server and sender
must be specified. They are available under following fields::
mail.settings.server
mail.settings.sender
mail.settings.login
mail.settings.timeout = 60 # seconds (default)
When server is 'logging', email is logged but not sent (debug mode)
Optionally you can use PGP encryption or X509::
mail.settings.cipher_type = None
mail.settings.gpg_home = None
mail.settings.sign = True
mail.settings.sign_passphrase = None
mail.settings.encrypt = True
mail.settings.x509_sign_keyfile = None
mail.settings.x509_sign_certfile = None
mail.settings.x509_sign_chainfile = None
mail.settings.x509_nocerts = False
mail.settings.x509_crypt_certfiles = None
cipher_type : None
gpg - need a python-pyme package and gpgme lib
x509 - smime
gpg_home : you can set a GNUPGHOME environment variable
to specify home of gnupg
sign : sign the message (True or False)
sign_passphrase : passphrase for key signing
encrypt : encrypt the message (True or False). It defaults
to True
... x509 only ...
x509_sign_keyfile : the signers private key filename or
string containing the key. (PEM format)
x509_sign_certfile: the signers certificate filename or
string containing the cert. (PEM format)
x509_sign_chainfile: sets the optional all-in-one file where you
can assemble the certificates of Certification
Authorities (CA) which form the certificate
chain of email certificate. It can be a
string containing the certs to. (PEM format)
x509_nocerts : if True then no attached certificate in mail
x509_crypt_certfiles: the certificates file or strings to encrypt
the messages with can be a file name /
string or a list of file names /
strings (PEM format)
Examples:
Create Mail object with authentication data for remote server::
mail = Mail('example.com:25', 'me@example.com', 'me:password')
Notice for GAE users:
attachments have an automatic content_id='attachment-i' where i is progressive number
in this way the can be referenced from the HTML as <img src="cid:attachment-0" /> etc.
"""
class Attachment(MIMEBase.MIMEBase):
"""
Email attachment
Args:
payload: path to file or file-like object with read() method
filename: name of the attachment stored in message; if set to
None, it will be fetched from payload path; file-like
object payload must have explicit filename specified
content_id: id of the attachment; automatically contained within
`<` and `>`
content_type: content type of the attachment; if set to None,
it will be fetched from filename using gluon.contenttype
module
encoding: encoding of all strings passed to this function (except
attachment body)
Content ID is used to identify attachments within the html body;
in example, attached image with content ID 'photo' may be used in
html message as a source of img tag `<img src="cid:photo" />`.
Example::
Create attachment from text file::
attachment = Mail.Attachment('/path/to/file.txt')
Content-Type: text/plain
MIME-Version: 1.0
Content-Disposition: attachment; filename="file.txt"
Content-Transfer-Encoding: base64
SOMEBASE64CONTENT=
Create attachment from image file with custom filename and cid::
attachment = Mail.Attachment('/path/to/file.png',
filename='photo.png',
content_id='photo')
Content-Type: image/png
MIME-Version: 1.0
Content-Disposition: attachment; filename="photo.png"
Content-Id: <photo>
Content-Transfer-Encoding: base64
SOMEOTHERBASE64CONTENT=
"""
def __init__(
self,
payload,
filename=None,
content_id=None,
content_type=None,
encoding='utf-8'):
if isinstance(payload, str):
if filename is None:
filename = os.path.basename(payload)
payload = read_file(payload, 'rb')
else:
if filename is None:
raise Exception('Missing attachment name')
payload = payload.read()
filename = filename.encode(encoding)
if content_type is None:
content_type = contenttype(filename)
self.my_filename = filename
self.my_payload = payload
MIMEBase.MIMEBase.__init__(self, *content_type.split('/', 1))
self.set_payload(payload)
self['Content-Disposition'] = 'attachment; filename="%s"' % filename
if not content_id is None:
self['Content-Id'] = '<%s>' % content_id.encode(encoding)
Encoders.encode_base64(self)
def __init__(self, server=None, sender=None, login=None, tls=True):
settings = self.settings = Settings()
settings.server = server
settings.sender = sender
settings.login = login
settings.tls = tls
settings.timeout = 60 # seconds
settings.hostname = None
settings.ssl = False
settings.cipher_type = None
settings.gpg_home = None
settings.sign = True
settings.sign_passphrase = None
settings.encrypt = True
settings.x509_sign_keyfile = None
settings.x509_sign_certfile = None
settings.x509_sign_chainfile = None
settings.x509_nocerts = False
settings.x509_crypt_certfiles = None
settings.debug = False
settings.lock_keys = True
self.result = {}
self.error = None
def send(self,
to,
subject='[no subject]',
message='[no message]',
attachments=None,
cc=None,
bcc=None,
reply_to=None,
sender=None,
encoding='utf-8',
raw=False,
headers={},
from_address=None,
cipher_type=None,
sign=None,
sign_passphrase=None,
encrypt=None,
x509_sign_keyfile=None,
x509_sign_chainfile=None,
x509_sign_certfile=None,
x509_crypt_certfiles=None,
x509_nocerts=None
):
"""
Sends an email using data specified in constructor
Args:
to: list or tuple of receiver addresses; will also accept single
object
subject: subject of the email
message: email body text; depends on type of passed object:
- if 2-list or 2-tuple is passed: first element will be
source of plain text while second of html text;
- otherwise: object will be the only source of plain text
and html source will be set to None
If text or html source is:
- None: content part will be ignored,
- string: content part will be set to it,
- file-like object: content part will be fetched from it using
it's read() method
attachments: list or tuple of Mail.Attachment objects; will also
accept single object
cc: list or tuple of carbon copy receiver addresses; will also
accept single object
bcc: list or tuple of blind carbon copy receiver addresses; will
also accept single object
reply_to: address to which reply should be composed
encoding: encoding of all strings passed to this method (including
message bodies)
headers: dictionary of headers to refine the headers just before
sending mail, e.g. `{'X-Mailer' : 'web2py mailer'}`
from_address: address to appear in the 'From:' header, this is not
the envelope sender. If not specified the sender will be used
cipher_type :
gpg - need a python-pyme package and gpgme lib
x509 - smime
gpg_home : you can set a GNUPGHOME environment variable
to specify home of gnupg
sign : sign the message (True or False)
sign_passphrase : passphrase for key signing
encrypt : encrypt the message (True or False). It defaults to True.
... x509 only ...
x509_sign_keyfile : the signers private key filename or
string containing the key. (PEM format)
x509_sign_certfile: the signers certificate filename or
string containing the cert. (PEM format)
x509_sign_chainfile: sets the optional all-in-one file where you
can assemble the certificates of Certification
Authorities (CA) which form the certificate
chain of email certificate. It can be a
string containing the certs to. (PEM format)
x509_nocerts : if True then no attached certificate in mail
x509_crypt_certfiles: the certificates file or strings to encrypt
the messages with can be a file name / string or
a list of file names / strings (PEM format)
Examples:
Send plain text message to single address::
mail.send('you@example.com',
'Message subject',
'Plain text body of the message')
Send html message to single address::
mail.send('you@example.com',
'Message subject',
'<html>Plain text body of the message</html>')
Send text and html message to three addresses (two in cc)::
mail.send('you@example.com',
'Message subject',
('Plain text body', '<html>html body</html>'),
cc=['other1@example.com', 'other2@example.com'])
Send html only message with image attachment available from the
message by 'photo' content id::
mail.send('you@example.com',
'Message subject',
(None, '<html><img src="cid:photo" /></html>'),
Mail.Attachment('/path/to/photo.jpg'
content_id='photo'))
Send email with two attachments and no body text::
mail.send('you@example.com,
'Message subject',
None,
[Mail.Attachment('/path/to/fist.file'),
Mail.Attachment('/path/to/second.file')])
Returns:
True on success, False on failure.
Before return, method updates two object's fields:
- self.result: return value of smtplib.SMTP.sendmail() or GAE's
mail.send_mail() method
- self.error: Exception message or None if above was successful
"""
# We don't want to use base64 encoding for unicode mail
Charset.add_charset('utf-8', Charset.QP, Charset.QP, 'utf-8')
def encode_header(key):
if [c for c in key if 32 > ord(c) or ord(c) > 127]:
return Header.Header(key.encode('utf-8'), 'utf-8')
else:
return key
# encoded or raw text
def encoded_or_raw(text):
if raw:
text = encode_header(text)
return text
sender = sender or self.settings.sender
if not isinstance(self.settings.server, str):
raise Exception('Server address not specified')
if not isinstance(sender, str):
raise Exception('Sender address not specified')
if not raw and attachments:
# Use multipart/mixed if there is attachments
payload_in = MIMEMultipart.MIMEMultipart('mixed')
elif raw:
# no encoding configuration for raw messages
if not isinstance(message, basestring):
message = message.read()
if isinstance(message, unicode):
text = message.encode('utf-8')
elif not encoding == 'utf-8':
text = message.decode(encoding).encode('utf-8')
else:
text = message
# No charset passed to avoid transport encoding
# NOTE: some unicode encoded strings will produce
# unreadable mail contents.
payload_in = MIMEText.MIMEText(text)
if to:
if not isinstance(to, (list, tuple)):
to = [to]
else:
raise Exception('Target receiver address not specified')
if cc:
if not isinstance(cc, (list, tuple)):
cc = [cc]
if bcc:
if not isinstance(bcc, (list, tuple)):
bcc = [bcc]
if message is None:
text = html = None
elif isinstance(message, (list, tuple)):
text, html = message
elif message.strip().startswith('<html') and \
message.strip().endswith('</html>'):
text = self.settings.server == 'gae' and message or None
html = message
else:
text = message
html = None
if (not text is None or not html is None) and (not raw):
if not text is None:
if not isinstance(text, basestring):
text = text.read()
if isinstance(text, unicode):
text = text.encode('utf-8')
elif not encoding == 'utf-8':
text = text.decode(encoding).encode('utf-8')
if not html is None:
if not isinstance(html, basestring):
html = html.read()
if isinstance(html, unicode):
html = html.encode('utf-8')
elif not encoding == 'utf-8':
html = html.decode(encoding).encode('utf-8')
# Construct mime part only if needed
if text is not None and html:
# We have text and html we need multipart/alternative
attachment = MIMEMultipart.MIMEMultipart('alternative')
attachment.attach(MIMEText.MIMEText(text, _charset='utf-8'))
attachment.attach(
MIMEText.MIMEText(html, 'html', _charset='utf-8'))
elif text is not None:
attachment = MIMEText.MIMEText(text, _charset='utf-8')
elif html:
attachment = \
MIMEText.MIMEText(html, 'html', _charset='utf-8')
if attachments:
# If there is attachments put text and html into
# multipart/mixed
payload_in.attach(attachment)
else:
# No attachments no multipart/mixed
payload_in = attachment
if (attachments is None) or raw:
pass
elif isinstance(attachments, (list, tuple)):
for attachment in attachments:
payload_in.attach(attachment)
else:
payload_in.attach(attachments)
#######################################################
# CIPHER #
#######################################################
cipher_type = cipher_type or self.settings.cipher_type
sign = sign if sign != None else self.settings.sign
sign_passphrase = sign_passphrase or self.settings.sign_passphrase
encrypt = encrypt if encrypt != None else self.settings.encrypt
#######################################################
# GPGME #
#######################################################
if cipher_type == 'gpg':
if self.settings.gpg_home:
# Set GNUPGHOME environment variable to set home of gnupg
import os
os.environ['GNUPGHOME'] = self.settings.gpg_home
if not sign and not encrypt:
self.error = "No sign and no encrypt is set but cipher type to gpg"
return False
# need a python-pyme package and gpgme lib
from pyme import core, errors
from pyme.constants.sig import mode
############################################
# sign #
############################################
if sign:
import string
core.check_version(None)
pin = string.replace(payload_in.as_string(), '\n', '\r\n')
plain = core.Data(pin)
sig = core.Data()
c = core.Context()
c.set_armor(1)
c.signers_clear()
# search for signing key for From:
for sigkey in c.op_keylist_all(sender, 1):
if sigkey.can_sign:
c.signers_add(sigkey)
if not c.signers_enum(0):
self.error = 'No key for signing [%s]' % sender
return False
c.set_passphrase_cb(lambda x, y, z: sign_passphrase)
try:
# make a signature
c.op_sign(plain, sig, mode.DETACH)
sig.seek(0, 0)
# make it part of the email
payload = MIMEMultipart.MIMEMultipart('signed',
boundary=None,
_subparts=None,
**dict(
micalg="pgp-sha1",
protocol="application/pgp-signature"))
# insert the origin payload
payload.attach(payload_in)
# insert the detached signature
p = MIMEBase.MIMEBase("application", 'pgp-signature')
p.set_payload(sig.read())
payload.attach(p)
# it's just a trick to handle the no encryption case
payload_in = payload
except errors.GPGMEError, ex:
self.error = "GPG error: %s" % ex.getstring()
return False
############################################
# encrypt #
############################################
if encrypt:
core.check_version(None)
plain = core.Data(payload_in.as_string())
cipher = core.Data()
c = core.Context()
c.set_armor(1)
# collect the public keys for encryption
recipients = []
rec = to[:]
if cc:
rec.extend(cc)
if bcc:
rec.extend(bcc)
for addr in rec:
c.op_keylist_start(addr, 0)
r = c.op_keylist_next()
if r is None:
self.error = 'No key for [%s]' % addr
return False
recipients.append(r)
try:
# make the encryption
c.op_encrypt(recipients, 1, plain, cipher)
cipher.seek(0, 0)
# make it a part of the email
payload = MIMEMultipart.MIMEMultipart('encrypted',
boundary=None,
_subparts=None,
**dict(protocol="application/pgp-encrypted"))
p = MIMEBase.MIMEBase("application", 'pgp-encrypted')
p.set_payload("Version: 1\r\n")
payload.attach(p)
p = MIMEBase.MIMEBase("application", 'octet-stream')
p.set_payload(cipher.read())
payload.attach(p)
except errors.GPGMEError, ex:
self.error = "GPG error: %s" % ex.getstring()
return False
#######################################################
# X.509 #
#######################################################
elif cipher_type == 'x509':
if not sign and not encrypt:
self.error = "No sign and no encrypt is set but cipher type to x509"
return False
import os
x509_sign_keyfile = x509_sign_keyfile or\
self.settings.x509_sign_keyfile
x509_sign_chainfile = x509_sign_chainfile or\
self.settings.x509_sign_chainfile
x509_sign_certfile = x509_sign_certfile or\
self.settings.x509_sign_certfile or\
x509_sign_keyfile or\
self.settings.x509_sign_certfile
# crypt certfiles could be a string or a list
x509_crypt_certfiles = x509_crypt_certfiles or\
self.settings.x509_crypt_certfiles
x509_nocerts = x509_nocerts or\
self.settings.x509_nocerts
# need m2crypto
try:
from M2Crypto import BIO, SMIME, X509
except Exception, e:
self.error = "Can't load M2Crypto module"
return False
msg_bio = BIO.MemoryBuffer(payload_in.as_string())
s = SMIME.SMIME()
# SIGN
if sign:
# key for signing
try:
keyfile_bio = BIO.openfile(x509_sign_keyfile)\
if os.path.isfile(x509_sign_keyfile)\
else BIO.MemoryBuffer(x509_sign_keyfile)
sign_certfile_bio = BIO.openfile(x509_sign_certfile)\
if os.path.isfile(x509_sign_certfile)\
else BIO.MemoryBuffer(x509_sign_certfile)
s.load_key_bio(keyfile_bio, sign_certfile_bio,
callback=lambda x: sign_passphrase)
if x509_sign_chainfile:
sk = X509.X509_Stack()
chain = X509.load_cert(x509_sign_chainfile)\
if os.path.isfile(x509_sign_chainfile)\
else X509.load_cert_string(x509_sign_chainfile)
sk.push(chain)
s.set_x509_stack(sk)
except Exception, e:
self.error = "Something went wrong on certificate / private key loading: <%s>" % str(e)
return False
try:
if x509_nocerts:
flags = SMIME.PKCS7_NOCERTS
else:
flags = 0
if not encrypt:
flags += SMIME.PKCS7_DETACHED
p7 = s.sign(msg_bio, flags=flags)
msg_bio = BIO.MemoryBuffer(payload_in.as_string(
)) # Recreate coz sign() has consumed it.
except Exception, e:
self.error = "Something went wrong on signing: <%s> %s" % (
str(e), str(flags))
return False
# ENCRYPT
if encrypt:
try:
sk = X509.X509_Stack()
if not isinstance(x509_crypt_certfiles, (list, tuple)):
x509_crypt_certfiles = [x509_crypt_certfiles]
# make an encryption cert's stack
for crypt_certfile in x509_crypt_certfiles:
certfile = X509.load_cert(crypt_certfile)\
if os.path.isfile(crypt_certfile)\
else X509.load_cert_string(crypt_certfile)
sk.push(certfile)
s.set_x509_stack(sk)
s.set_cipher(SMIME.Cipher('des_ede3_cbc'))
tmp_bio = BIO.MemoryBuffer()
if sign:
s.write(tmp_bio, p7)
else:
tmp_bio.write(payload_in.as_string())
p7 = s.encrypt(tmp_bio)
except Exception, e:
self.error = "Something went wrong on encrypting: <%s>" % str(e)
return False
# Final stage in sign and encryption
out = BIO.MemoryBuffer()
if encrypt:
s.write(out, p7)
else:
if sign:
s.write(out, p7, msg_bio, SMIME.PKCS7_DETACHED)
else:
out.write('\r\n')
out.write(payload_in.as_string())
out.close()
st = str(out.read())
payload = message_from_string(st)
else:
# no cryptography process as usual
payload = payload_in
if from_address:
payload['From'] = encoded_or_raw(from_address.decode(encoding))
else:
payload['From'] = encoded_or_raw(sender.decode(encoding))
origTo = to[:]
if to:
payload['To'] = encoded_or_raw(', '.join(to).decode(encoding))
if reply_to:
payload['Reply-To'] = encoded_or_raw(reply_to.decode(encoding))
if cc:
payload['Cc'] = encoded_or_raw(', '.join(cc).decode(encoding))
to.extend(cc)
if bcc:
to.extend(bcc)
payload['Subject'] = encoded_or_raw(subject.decode(encoding))
payload['Date'] = email.utils.formatdate()
for k, v in headers.iteritems():
payload[k] = encoded_or_raw(v.decode(encoding))
result = {}
try:
if self.settings.server == 'logging':
logger.warn('email not sent\n%s\nFrom: %s\nTo: %s\nSubject: %s\n\n%s\n%s\n' %
('-' * 40, sender,
', '.join(to), subject,
text or html, '-' * 40))
elif self.settings.server == 'gae':
xcc = dict()
if cc:
xcc['cc'] = cc
if bcc:
xcc['bcc'] = bcc
if reply_to:
xcc['reply_to'] = reply_to
from google.appengine.api import mail
attachments = attachments and [mail.Attachment(
a.my_filename,
a.my_payload,
contebt_id='<attachment-%s>' % k
) for k,a in enumerate(attachments) if not raw]
if attachments:
result = mail.send_mail(
sender=sender, to=origTo,
subject=subject, body=text, html=html,
attachments=attachments, **xcc)
elif html and (not raw):
result = mail.send_mail(
sender=sender, to=origTo,
subject=subject, body=text, html=html, **xcc)
else:
result = mail.send_mail(
sender=sender, to=origTo,
subject=subject, body=text, **xcc)
else:
smtp_args = self.settings.server.split(':')
kwargs = dict(timeout=self.settings.timeout)
if self.settings.ssl:
server = smtplib.SMTP_SSL(*smtp_args, **kwargs)
else:
server = smtplib.SMTP(*smtp_args, **kwargs)
if self.settings.tls and not self.settings.ssl:
server.ehlo(self.settings.hostname)
server.starttls()
server.ehlo(self.settings.hostname)
if self.settings.login:
server.login(*self.settings.login.split(':', 1))
result = server.sendmail(
sender, to, payload.as_string())
server.quit()
except Exception, e:
logger.warn('Mail.send failure:%s' % e)
self.result = result
self.error = e
return False
self.result = result
self.error = None
return True
class Recaptcha(DIV):
"""
Examples:
Use as::
form = FORM(Recaptcha(public_key='...',private_key='...'))
or::
form = SQLFORM(...)
form.append(Recaptcha(public_key='...',private_key='...'))
"""
API_SSL_SERVER = 'https://www.google.com/recaptcha/api'
API_SERVER = 'http://www.google.com/recaptcha/api'
VERIFY_SERVER = 'http://www.google.com/recaptcha/api/verify'
def __init__(self,
request=None,
public_key='',
private_key='',
use_ssl=False,
error=None,
error_message='invalid',
label='Verify:',
options='',
comment='',
ajax=False
):
request = request or current.request
self.request_vars = request and request.vars or current.request.vars
self.remote_addr = request.env.remote_addr
self.public_key = public_key
self.private_key = private_key
self.use_ssl = use_ssl
self.error = error
self.errors = Storage()
self.error_message = error_message
self.components = []
self.attributes = {}
self.label = label
self.options = options
self.comment = comment
self.ajax = ajax
def _validate(self):
# for local testing:
recaptcha_challenge_field = \
self.request_vars.recaptcha_challenge_field
recaptcha_response_field = \
self.request_vars.recaptcha_response_field
private_key = self.private_key
remoteip = self.remote_addr
if not (recaptcha_response_field and recaptcha_challenge_field
and len(recaptcha_response_field)
and len(recaptcha_challenge_field)):
self.errors['captcha'] = self.error_message
return False
params = urllib.urlencode({
'privatekey': private_key,
'remoteip': remoteip,
'challenge': recaptcha_challenge_field,
'response': recaptcha_response_field,
})
request = urllib2.Request(
url=self.VERIFY_SERVER,
data=params,
headers={'Content-type': 'application/x-www-form-urlencoded',
'User-agent': 'reCAPTCHA Python'})
httpresp = urllib2.urlopen(request)
return_values = httpresp.read().splitlines()
httpresp.close()
return_code = return_values[0]
if return_code == 'true':
del self.request_vars.recaptcha_challenge_field
del self.request_vars.recaptcha_response_field
self.request_vars.captcha = ''
return True
else:
# In case we get an error code, store it so we can get an error message
# from the /api/challenge URL as described in the reCAPTCHA api docs.
self.error = return_values[1]
self.errors['captcha'] = self.error_message
return False
def xml(self):
public_key = self.public_key
use_ssl = self.use_ssl
error_param = ''
if self.error:
error_param = '&error=%s' % self.error
if use_ssl:
server = self.API_SSL_SERVER
else:
server = self.API_SERVER
if not self.ajax:
captcha = DIV(
SCRIPT("var RecaptchaOptions = {%s};" % self.options),
SCRIPT(_type="text/javascript",
_src="%s/challenge?k=%s%s" % (server, public_key, error_param)),
TAG.noscript(
IFRAME(
_src="%s/noscript?k=%s%s" % (
server, public_key, error_param),
_height="300", _width="500", _frameborder="0"), BR(),
INPUT(
_type='hidden', _name='recaptcha_response_field',
_value='manual_challenge')), _id='recaptcha')
else: #use Google's ajax interface, needed for LOADed components
url_recaptcha_js = "%s/js/recaptcha_ajax.js" % server
RecaptchaOptions = "var RecaptchaOptions = {%s}" % self.options
script = """%(options)s;
jQuery.getScript('%(url)s',function() {
Recaptcha.create('%(public_key)s',
'recaptcha',jQuery.extend(RecaptchaOptions,{'callback':Recaptcha.focus_response_field}))
}) """ % ({'options': RecaptchaOptions, 'url': url_recaptcha_js, 'public_key': public_key})
captcha = DIV(
SCRIPT(
script,
_type="text/javascript",
),
TAG.noscript(
IFRAME(
_src="%s/noscript?k=%s%s" % (
server, public_key, error_param),
_height="300", _width="500", _frameborder="0"), BR(),
INPUT(
_type='hidden', _name='recaptcha_response_field',
_value='manual_challenge')), _id='recaptcha')
if not self.errors.captcha:
return XML(captcha).xml()
else:
captcha.append(DIV(self.errors['captcha'], _class='error'))
return XML(captcha).xml()
# this should only be used for catcha and perhaps not even for that
def addrow(form, a, b, c, style, _id, position=-1):
if style == "divs":
form[0].insert(position, DIV(DIV(LABEL(a), _class='w2p_fl'),
DIV(b, _class='w2p_fw'),
DIV(c, _class='w2p_fc'),
_id=_id))
elif style == "table2cols":
form[0].insert(position, TR(TD(LABEL(a), _class='w2p_fl'),
TD(c, _class='w2p_fc')))
form[0].insert(position + 1, TR(TD(b, _class='w2p_fw'),
_colspan=2, _id=_id))
elif style == "ul":
form[0].insert(position, LI(DIV(LABEL(a), _class='w2p_fl'),
DIV(b, _class='w2p_fw'),
DIV(c, _class='w2p_fc'),
_id=_id))
elif style == "bootstrap":
form[0].insert(position, DIV(LABEL(a, _class='control-label'),
DIV(b, SPAN(c, _class='inline-help'),
_class='controls'),
_class='control-group', _id=_id))
else:
form[0].insert(position, TR(TD(LABEL(a), _class='w2p_fl'),
TD(b, _class='w2p_fw'),
TD(c, _class='w2p_fc'), _id=_id))
class Auth(object):
default_settings = dict(
hideerror=False,
password_min_length=4,
cas_maps=None,
reset_password_requires_verification=False,
registration_requires_verification=False,
registration_requires_approval=False,
login_after_registration=False,
login_after_password_change=True,
alternate_requires_registration=False,
create_user_groups="user_%(id)s",
everybody_group_id=None,
manager_actions={},
auth_manager_role=None,
two_factor_authentication_group = None,
login_captcha=None,
register_captcha=None,
pre_registration_div=None,
retrieve_username_captcha=None,
retrieve_password_captcha=None,
captcha=None,
prevent_open_redirect_attacks=True,
prevent_password_reset_attacks=True,
expiration=3600, # one hour
long_expiration=3600 * 30 * 24, # one month
remember_me_form=True,
allow_basic_login=False,
allow_basic_login_only=False,
on_failed_authentication=lambda x: redirect(x),
formstyle=None,
label_separator=None,
logging_enabled = True,
allow_delete_accounts=False,
password_field='password',
table_user_name='auth_user',
table_group_name='auth_group',
table_membership_name='auth_membership',
table_permission_name='auth_permission',
table_event_name='auth_event',
table_cas_name='auth_cas',
table_user=None,
table_group=None,
table_membership=None,
table_permission=None,
table_event=None,
table_cas=None,
showid=False,
use_username=False,
login_email_validate=True,
login_userfield=None,
multi_login=False,
logout_onlogout=None,
register_fields=None,
register_verify_password=True,
profile_fields=None,
email_case_sensitive=True,
username_case_sensitive=True,
update_fields=['email'],
ondelete="CASCADE",
client_side=True,
renew_session_onlogin=True,
renew_session_onlogout=True,
keep_session_onlogin=True,
keep_session_onlogout=False,
wiki=Settings(),
)
# ## these are messages that can be customized
default_messages = dict(
login_button='Log In',
register_button='Sign Up',
password_reset_button='Request reset password',
password_change_button='Change password',
profile_save_button='Apply changes',
submit_button='Submit',
verify_password='Verify Password',
delete_label='Check to delete',
function_disabled='Function disabled',
access_denied='Insufficient privileges',
registration_verifying='Registration needs verification',
registration_pending='Registration is pending approval',
email_taken='This email already has an account',
invalid_username='Invalid username',
username_taken='Username already taken',
login_disabled='Login disabled by administrator',
logged_in='Logged in',
email_sent='Email sent',
unable_to_send_email='Unable to send email',
email_verified='Email verified',
logged_out='Logged out',
registration_successful='Registration successful',
invalid_email='Invalid email',
unable_send_email='Unable to send email',
invalid_login='Invalid login',
invalid_user='Invalid user',
invalid_password='Invalid password',
is_empty="Cannot be empty",
mismatched_password="Password fields don't match",
verify_email='Welcome %(username)s! Click on the link %(link)s to verify your email',
verify_email_subject='Email verification',
username_sent='Your username was emailed to you',
new_password_sent='A new password was emailed to you',
password_changed='Password changed',
retrieve_username='Your username is: %(username)s',
retrieve_username_subject='Username retrieve',
retrieve_password='Your password is: %(password)s',
retrieve_password_subject='Password retrieve',
reset_password='Click on the link %(link)s to reset your password',
reset_password_subject='Password reset',
invalid_reset_password='Invalid reset password',
profile_updated='Profile updated',
new_password='New password',
old_password='Old password',
group_description='Group uniquely assigned to user %(id)s',
register_log='User %(id)s Registered',
login_log='User %(id)s Logged-in',
login_failed_log=None,
logout_log='User %(id)s Logged-out',
profile_log='User %(id)s Profile updated',
verify_email_log='User %(id)s Verification email sent',
retrieve_username_log='User %(id)s Username retrieved',
retrieve_password_log='User %(id)s Password retrieved',
reset_password_log='User %(id)s Password reset',
change_password_log='User %(id)s Password changed',
add_group_log='Group %(group_id)s created',
del_group_log='Group %(group_id)s deleted',
add_membership_log=None,
del_membership_log=None,
has_membership_log=None,
add_permission_log=None,
del_permission_log=None,
has_permission_log=None,
impersonate_log='User %(id)s is impersonating %(other_id)s',
label_first_name='First name',
label_last_name='Last name',
label_username='Username',
label_email='E-mail',
label_password='Password',
label_registration_key='Registration key',
label_reset_password_key='Reset Password key',
label_registration_id='Registration identifier',
label_role='Role',
label_description='Description',
label_user_id='User ID',
label_group_id='Group ID',
label_name='Name',
label_table_name='Object or table name',
label_record_id='Record ID',
label_time_stamp='Timestamp',
label_client_ip='Client IP',
label_origin='Origin',
label_remember_me="Remember me (for 30 days)",
verify_password_comment='please input your password again',
)
"""
Class for authentication, authorization, role based access control.
Includes:
- registration and profile
- login and logout
- username and password retrieval
- event logging
- role creation and assignment
- user defined group/role based permission
Args:
environment: is there for legacy but unused (awful)
db: has to be the database where to create tables for authentication
mailer: `Mail(...)` or None (no mailer) or True (make a mailer)
hmac_key: can be a hmac_key or hmac_key=Auth.get_or_create_key()
controller: (where is the user action?)
cas_provider: (delegate authentication to the URL, CAS2)
Authentication Example::
from gluon.contrib.utils import *
mail=Mail()
mail.settings.server='smtp.gmail.com:587'
mail.settings.sender='you@somewhere.com'
mail.settings.login='username:password'
auth=Auth(db)
auth.settings.mailer=mail
# auth.settings....=...
auth.define_tables()
def authentication():
return dict(form=auth())
Exposes:
- `http://.../{application}/{controller}/authentication/login`
- `http://.../{application}/{controller}/authentication/logout`
- `http://.../{application}/{controller}/authentication/register`
- `http://.../{application}/{controller}/authentication/verify_email`
- `http://.../{application}/{controller}/authentication/retrieve_username`
- `http://.../{application}/{controller}/authentication/retrieve_password`
- `http://.../{application}/{controller}/authentication/reset_password`
- `http://.../{application}/{controller}/authentication/profile`
- `http://.../{application}/{controller}/authentication/change_password`
On registration a group with role=new_user.id is created
and user is given membership of this group.
You can create a group with::
group_id=auth.add_group('Manager', 'can access the manage action')
auth.add_permission(group_id, 'access to manage')
Here "access to manage" is just a user defined string.
You can give access to a user::
auth.add_membership(group_id, user_id)
If user id is omitted, the logged in user is assumed
Then you can decorate any action::
@auth.requires_permission('access to manage')
def manage():
return dict()
You can restrict a permission to a specific table::
auth.add_permission(group_id, 'edit', db.sometable)
@auth.requires_permission('edit', db.sometable)
Or to a specific record::
auth.add_permission(group_id, 'edit', db.sometable, 45)
@auth.requires_permission('edit', db.sometable, 45)
If authorization is not granted calls::
auth.settings.on_failed_authorization
Other options::
auth.settings.mailer=None
auth.settings.expiration=3600 # seconds
...
### these are messages that can be customized
...
"""
@staticmethod
def get_or_create_key(filename=None, alg='sha512'):
request = current.request
if not filename:
filename = os.path.join(request.folder, 'private', 'auth.key')
if os.path.exists(filename):
key = open(filename, 'r').read().strip()
else:
key = alg + ':' + web2py_uuid()
open(filename, 'w').write(key)
return key
def url(self, f=None, args=None, vars=None, scheme=False):
if args is None:
args = []
if vars is None:
vars = {}
return URL(c=self.settings.controller,
f=f, args=args, vars=vars, scheme=scheme)
def here(self):
return URL(args=current.request.args, vars=current.request.get_vars)
def __init__(self, environment=None, db=None, mailer=True,
hmac_key=None, controller='default', function='user',
cas_provider=None, signature=True, secure=False,
csrf_prevention=True, propagate_extension=None,
url_index=None):
## next two lines for backward compatibility
if not db and environment and isinstance(environment, DAL):
db = environment
self.db = db
self.environment = current
self.csrf_prevention = csrf_prevention
request = current.request
session = current.session
auth = session.auth
self.user_groups = auth and auth.user_groups or {}
if secure:
request.requires_https()
now = request.now
# if we have auth info
# if not expired it, used it
# if expired, clear the session
# else, only clear auth info in the session
if auth:
delta = datetime.timedelta(days=0, seconds=auth.expiration)
if auth.last_visit and auth.last_visit + delta > now:
self.user = auth.user
# this is a trick to speed up sessions to avoid many writes
if (now - auth.last_visit).seconds > (auth.expiration / 10):
auth.last_visit = request.now
else:
self.user = None
if session.auth:
del session.auth
session.renew(clear_session=True)
else:
self.user = None
if session.auth:
del session.auth
# ## what happens after login?
url_index = url_index or URL(controller, 'index')
url_login = URL(controller, function, args='login',
extension = propagate_extension)
# ## what happens after registration?
settings = self.settings = Settings()
settings.update(Auth.default_settings)
settings.update(
cas_domains=[request.env.http_host],
cas_provider=cas_provider,
cas_actions=dict(login='login',
validate='validate',
servicevalidate='serviceValidate',
proxyvalidate='proxyValidate',
logout='logout'),
extra_fields={},
actions_disabled=[],
controller=controller,
function=function,
login_url=url_login,
logged_url=URL(controller, function, args='profile'),
download_url=URL(controller, 'download'),
mailer=(mailer is True) and Mail() or mailer,
on_failed_authorization =
URL(controller, function, args='not_authorized'),
login_next = url_index,
login_onvalidation = [],
login_onaccept = [],
login_onfail = [],
login_methods = [self],
login_form = self,
logout_next = url_index,
logout_onlogout = None,
register_next = url_index,
register_onvalidation = [],
register_onaccept = [],
verify_email_next = url_login,
verify_email_onaccept = [],
profile_next = url_index,
profile_onvalidation = [],
profile_onaccept = [],
retrieve_username_next = url_index,
retrieve_password_next = url_index,
request_reset_password_next = url_login,
reset_password_next = url_index,
change_password_next = url_index,
change_password_onvalidation = [],
change_password_onaccept = [],
retrieve_password_onvalidation = [],
reset_password_onvalidation = [],
reset_password_onaccept = [],
hmac_key = hmac_key,
formstyle = current.response.formstyle,
label_separator = current.response.form_label_separator
)
settings.lock_keys = True
# ## these are messages that can be customized
messages = self.messages = Messages(current.T)
messages.update(Auth.default_messages)
messages.update(ajax_failed_authentication=
DIV(H4('NOT AUTHORIZED'),
'Please ',
A('login',
_href=self.settings.login_url +
('?_next=' + urllib.quote(current.request.env.http_web2py_component_location))
if current.request.env.http_web2py_component_location else ''),
' to view this content.',
_class='not-authorized alert alert-block'))
messages.lock_keys = True
# for "remember me" option
response = current.response
if auth and auth.remember_me:
# when user wants to be logged in for longer
response.session_cookie_expires = auth.expiration
if signature:
self.define_signature()
else:
self.signature = None
def get_vars_next(self):
next = current.request.vars._next
if isinstance(next, (list, tuple)):
next = next[0]
return next
def _get_user_id(self):
"""accessor for auth.user_id"""
return self.user and self.user.id or None
user_id = property(_get_user_id, doc="user.id or None")
def table_user(self):
return self.db[self.settings.table_user_name]
def table_group(self):
return self.db[self.settings.table_group_name]
def table_membership(self):
return self.db[self.settings.table_membership_name]
def table_permission(self):
return self.db[self.settings.table_permission_name]
def table_event(self):
return self.db[self.settings.table_event_name]
def table_cas(self):
return self.db[self.settings.table_cas_name]
def _HTTP(self, *a, **b):
"""
only used in lambda: self._HTTP(404)
"""
raise HTTP(*a, **b)
def __call__(self):
"""
Example:
Use as::
def authentication():
return dict(form=auth())
"""
request = current.request
args = request.args
if not args:
redirect(self.url(args='login', vars=request.vars))
elif args[0] in self.settings.actions_disabled:
raise HTTP(404)
if args[0] in ('login', 'logout', 'register', 'verify_email',
'retrieve_username', 'retrieve_password',
'reset_password', 'request_reset_password',
'change_password', 'profile', 'groups',
'impersonate', 'not_authorized'):
if len(request.args) >= 2 and args[0] == 'impersonate':
return getattr(self, args[0])(request.args[1])
else:
return getattr(self, args[0])()
elif args[0] == 'cas' and not self.settings.cas_provider:
if args(1) == self.settings.cas_actions['login']:
return self.cas_login(version=2)
elif args(1) == self.settings.cas_actions['validate']:
return self.cas_validate(version=1)
elif args(1) == self.settings.cas_actions['servicevalidate']:
return self.cas_validate(version=2, proxy=False)
elif args(1) == self.settings.cas_actions['proxyvalidate']:
return self.cas_validate(version=2, proxy=True)
elif args(1) == self.settings.cas_actions['logout']:
return self.logout(next=request.vars.service or DEFAULT)
else:
raise HTTP(404)
def navbar(self, prefix='Welcome', action=None,
separators=(' [ ', ' | ', ' ] '), user_identifier=DEFAULT,
referrer_actions=DEFAULT, mode='default'):
""" Navbar with support for more templates
This uses some code from the old navbar.
Args:
mode: see options for list of
"""
items = [] # Hold all menu items in a list
self.bar = '' # The final
T = current.T
referrer_actions = [] if not referrer_actions else referrer_actions
if not action:
action = self.url(self.settings.function)
request = current.request
if URL() == action:
next = ''
else:
next = '?_next=' + urllib.quote(URL(args=request.args,
vars=request.get_vars))
href = lambda function: '%s/%s%s' % (action, function, next
if referrer_actions is DEFAULT
or function in referrer_actions
else '')
if isinstance(prefix, str):
prefix = T(prefix)
if prefix:
prefix = prefix.strip() + ' '
def Anr(*a, **b):
b['_rel'] = 'nofollow'
return A(*a, **b)
if self.user_id: # User is logged in
logout_next = self.settings.logout_next
items.append({'name': T('Log Out'),
'href': '%s/logout?_next=%s' % (action,
urllib.quote(
logout_next)),
'icon': 'icon-off'})
if not 'profile' in self.settings.actions_disabled:
items.append({'name': T('Profile'), 'href': href('profile'),
'icon': 'icon-user'})
if not 'change_password' in self.settings.actions_disabled:
items.append({'name': T('Password'),
'href': href('change_password'),
'icon': 'icon-lock'})
if user_identifier is DEFAULT:
user_identifier = '%(first_name)s'
if callable(user_identifier):
user_identifier = user_identifier(self.user)
elif ((isinstance(user_identifier, str) or
type(user_identifier).__name__ == 'lazyT') and
re.search(r'%\(.+\)s', user_identifier)):
user_identifier = user_identifier % self.user
if not user_identifier:
user_identifier = ''
else: # User is not logged in
items.append({'name': T('Log In'), 'href': href('login'),
'icon': 'icon-off'})
if not 'register' in self.settings.actions_disabled:
items.append({'name': T('Sign Up'), 'href': href('register'),
'icon': 'icon-user'})
if not 'request_reset_password' in self.settings.actions_disabled:
items.append({'name': T('Lost password?'),
'href': href('request_reset_password'),
'icon': 'icon-lock'})
if (self.settings.use_username and not
'retrieve_username' in self.settings.actions_disabled):
items.append({'name': T('Forgot username?'),
'href': href('retrieve_username'),
'icon': 'icon-edit'})
def menu(): # For inclusion in MENU
self.bar = [(items[0]['name'], False, items[0]['href'], [])]
del items[0]
for item in items:
self.bar[0][3].append((item['name'], False, item['href']))
def bootstrap3(): # Default web2py scaffolding
def rename(icon): return icon+' '+icon.replace('icon', 'glyphicon')
self.bar = UL(LI(Anr(I(_class=rename('icon '+items[0]['icon'])),
' ' + items[0]['name'],
_href=items[0]['href'])), _class='dropdown-menu')
del items[0]
for item in items:
self.bar.insert(-1, LI(Anr(I(_class=rename('icon '+item['icon'])),
' ' + item['name'],
_href=item['href'])))
self.bar.insert(-1, LI('', _class='divider'))
if self.user_id:
self.bar = LI(Anr(prefix, user_identifier,
_href='#', _class="dropdown-toggle",
data={'toggle': 'dropdown'}),
self.bar, _class='dropdown')
else:
self.bar = LI(Anr(T('Log In'),
_href='#', _class="dropdown-toggle",
data={'toggle': 'dropdown'}), self.bar,
_class='dropdown')
def bare():
""" In order to do advanced customization we only need the
prefix, the user_identifier and the href attribute of items
Examples:
Use as::
# in module custom_layout.py
from gluon import *
def navbar(auth_navbar):
bar = auth_navbar
user = bar["user"]
if not user:
btn_login = A(current.T("Login"),
_href=bar["login"],
_class="btn btn-success",
_rel="nofollow")
btn_register = A(current.T("Sign up"),
_href=bar["register"],
_class="btn btn-primary",
_rel="nofollow")
return DIV(btn_register, btn_login, _class="btn-group")
else:
toggletext = "%s back %s" % (bar["prefix"], user)
toggle = A(toggletext,
_href="#",
_class="dropdown-toggle",
_rel="nofollow",
**{"_data-toggle": "dropdown"})
li_profile = LI(A(I(_class="icon-user"), ' ',
current.T("Account details"),
_href=bar["profile"], _rel="nofollow"))
li_custom = LI(A(I(_class="icon-book"), ' ',
current.T("My Agenda"),
_href="#", rel="nofollow"))
li_logout = LI(A(I(_class="icon-off"), ' ',
current.T("logout"),
_href=bar["logout"], _rel="nofollow"))
dropdown = UL(li_profile,
li_custom,
LI('', _class="divider"),
li_logout,
_class="dropdown-menu", _role="menu")
return LI(toggle, dropdown, _class="dropdown")
# in models db.py
import custom_layout as custom
# in layout.html
<ul id="navbar" class="nav pull-right">
{{='auth' in globals() and \
custom.navbar(auth.navbar(mode='bare')) or ''}}</ul>
"""
bare = {}
bare['prefix'] = prefix
bare['user'] = user_identifier if self.user_id else None
for i in items:
if i['name'] == T('Log In'):
k = 'login'
elif i['name'] == T('Sign Up'):
k = 'register'
elif i['name'] == T('Lost password?'):
k = 'request_reset_password'
elif i['name'] == T('Forgot username?'):
k = 'retrieve_username'
elif i['name'] == T('Log Out'):
k = 'logout'
elif i['name'] == T('Profile'):
k = 'profile'
elif i['name'] == T('Password'):
k = 'change_password'
bare[k] = i['href']
self.bar = bare
options = {'asmenu': menu,
'dropdown': bootstrap3,
'bare': bare
} # Define custom modes.
if mode in options and callable(options[mode]):
options[mode]()
else:
s1, s2, s3 = separators
if self.user_id:
self.bar = SPAN(prefix, user_identifier, s1,
Anr(items[0]['name'],
_href=items[0]['href']), s3,
_class='auth_navbar')
else:
self.bar = SPAN(s1, Anr(items[0]['name'],
_href=items[0]['href']), s3,
_class='auth_navbar')
for item in items[1:]:
self.bar.insert(-1, s2)
self.bar.insert(-1, Anr(item['name'], _href=item['href']))
return self.bar
def __get_migrate(self, tablename, migrate=True):
if type(migrate).__name__ == 'str':
return (migrate + tablename + '.table')
elif migrate == False:
return False
else:
return True
def enable_record_versioning(self,
tables,
archive_db=None,
archive_names='%(tablename)s_archive',
current_record='current_record',
current_record_label=None):
"""
Used to enable full record versioning (including auth tables)::
auth = Auth(db)
auth.define_tables(signature=True)
# define our own tables
db.define_table('mything',Field('name'),auth.signature)
auth.enable_record_versioning(tables=db)
tables can be the db (all table) or a list of tables.
only tables with modified_by and modified_on fiels (as created
by auth.signature) will have versioning. Old record versions will be
in table 'mything_archive' automatically defined.
when you enable enable_record_versioning, records are never
deleted but marked with is_active=False.
enable_record_versioning enables a common_filter for
every table that filters out records with is_active = False
Note:
If you use auth.enable_record_versioning,
do not use auth.archive or you will end up with duplicates.
auth.archive does explicitly what enable_record_versioning
does automatically.
"""
current_record_label = current_record_label or current.T(
current_record.replace('_', ' ').title())
for table in tables:
fieldnames = table.fields()
if ('id' in fieldnames and
'modified_on' in fieldnames and
not current_record in fieldnames):
table._enable_record_versioning(
archive_db=archive_db,
archive_name=archive_names,
current_record=current_record,
current_record_label=current_record_label)
def define_signature(self):
db = self.db
settings = self.settings
request = current.request
T = current.T
reference_user = 'reference %s' % settings.table_user_name
def lazy_user(auth=self):
return auth.user_id
def represent(id, record=None, s=settings):
try:
user = s.table_user(id)
return '%s %s' % (user.get("first_name", user.get("email")),
user.get("last_name", ''))
except:
return id
ondelete = self.settings.ondelete
self.signature = Table(
self.db, 'auth_signature',
Field('is_active', 'boolean',
default=True,
readable=False, writable=False,
label=T('Is Active')),
Field('created_on', 'datetime',
default=request.now,
writable=False, readable=False,
label=T('Created On')),
Field('created_by',
reference_user,
default=lazy_user, represent=represent,
writable=False, readable=False,
label=T('Created By'), ondelete=ondelete),
Field('modified_on', 'datetime',
update=request.now, default=request.now,
writable=False, readable=False,
label=T('Modified On')),
Field('modified_by',
reference_user, represent=represent,
default=lazy_user, update=lazy_user,
writable=False, readable=False,
label=T('Modified By'), ondelete=ondelete))
def define_tables(self, username=None, signature=None,
migrate=None, fake_migrate=None):
"""
To be called unless tables are defined manually
Examples:
Use as::
# defines all needed tables and table files
# 'myprefix_auth_user.table', ...
auth.define_tables(migrate='myprefix_')
# defines all needed tables without migration/table files
auth.define_tables(migrate=False)
"""
db = self.db
if migrate is None:
migrate = db._migrate
if fake_migrate is None:
fake_migrate = db._fake_migrate
settings = self.settings
if username is None:
username = settings.use_username
else:
settings.use_username = username
if not self.signature:
self.define_signature()
if signature == True:
signature_list = [self.signature]
elif not signature:
signature_list = []
elif isinstance(signature, Table):
signature_list = [signature]
else:
signature_list = signature
is_not_empty = IS_NOT_EMPTY(error_message=self.messages.is_empty)
is_crypted = CRYPT(key=settings.hmac_key,
min_length=settings.password_min_length)
is_unique_email = [
IS_EMAIL(error_message=self.messages.invalid_email),
IS_NOT_IN_DB(db, '%s.email' % settings.table_user_name,
error_message=self.messages.email_taken)]
if not settings.email_case_sensitive:
is_unique_email.insert(1, IS_LOWER())
if not settings.table_user_name in db.tables:
passfield = settings.password_field
extra_fields = settings.extra_fields.get(
settings.table_user_name, []) + signature_list
if username or settings.cas_provider:
is_unique_username = \
[IS_MATCH('[\w\.\-]+', strict=True,
error_message=self.messages.invalid_username),
IS_NOT_IN_DB(db, '%s.username' % settings.table_user_name,
error_message=self.messages.username_taken)]
if not settings.username_case_sensitive:
is_unique_username.insert(1, IS_LOWER())
db.define_table(
settings.table_user_name,
Field('first_name', length=128, default='',
label=self.messages.label_first_name,
requires=is_not_empty),
Field('last_name', length=128, default='',
label=self.messages.label_last_name,
requires=is_not_empty),
Field('email', length=512, default='',
label=self.messages.label_email,
requires=is_unique_email),
Field('username', length=128, default='',
label=self.messages.label_username,
requires=is_unique_username),
Field(passfield, 'password', length=512,
readable=False, label=self.messages.label_password,
requires=[is_crypted]),
Field('registration_key', length=512,
writable=False, readable=False, default='',
label=self.messages.label_registration_key),
Field('reset_password_key', length=512,
writable=False, readable=False, default='',
label=self.messages.label_reset_password_key),
Field('registration_id', length=512,
writable=False, readable=False, default='',
label=self.messages.label_registration_id),
*extra_fields,
**dict(
migrate=self.__get_migrate(settings.table_user_name,
migrate),
fake_migrate=fake_migrate,
format='%(username)s'))
else:
db.define_table(
settings.table_user_name,
Field('first_name', length=128, default='',
label=self.messages.label_first_name,
requires=is_not_empty),
Field('last_name', length=128, default='',
label=self.messages.label_last_name,
requires=is_not_empty),
Field('email', length=512, default='',
label=self.messages.label_email,
requires=is_unique_email),
Field(passfield, 'password', length=512,
readable=False, label=self.messages.label_password,
requires=[is_crypted]),
Field('registration_key', length=512,
writable=False, readable=False, default='',
label=self.messages.label_registration_key),
Field('reset_password_key', length=512,
writable=False, readable=False, default='',
label=self.messages.label_reset_password_key),
Field('registration_id', length=512,
writable=False, readable=False, default='',
label=self.messages.label_registration_id),
*extra_fields,
**dict(
migrate=self.__get_migrate(settings.table_user_name,
migrate),
fake_migrate=fake_migrate,
format='%(first_name)s %(last_name)s (%(id)s)'))
reference_table_user = 'reference %s' % settings.table_user_name
if not settings.table_group_name in db.tables:
extra_fields = settings.extra_fields.get(
settings.table_group_name, []) + signature_list
db.define_table(
settings.table_group_name,
Field('role', length=512, default='',
label=self.messages.label_role,
requires=IS_NOT_IN_DB(db, '%s.role' % settings.table_group_name)),
Field('description', 'text',
label=self.messages.label_description),
*extra_fields,
**dict(
migrate=self.__get_migrate(
settings.table_group_name, migrate),
fake_migrate=fake_migrate,
format='%(role)s (%(id)s)'))
reference_table_group = 'reference %s' % settings.table_group_name
if not settings.table_membership_name in db.tables:
extra_fields = settings.extra_fields.get(
settings.table_membership_name, []) + signature_list
db.define_table(
settings.table_membership_name,
Field('user_id', reference_table_user,
label=self.messages.label_user_id),
Field('group_id', reference_table_group,
label=self.messages.label_group_id),
*extra_fields,
**dict(
migrate=self.__get_migrate(
settings.table_membership_name, migrate),
fake_migrate=fake_migrate))
if not settings.table_permission_name in db.tables:
extra_fields = settings.extra_fields.get(
settings.table_permission_name, []) + signature_list
db.define_table(
settings.table_permission_name,
Field('group_id', reference_table_group,
label=self.messages.label_group_id),
Field('name', default='default', length=512,
label=self.messages.label_name,
requires=is_not_empty),
Field('table_name', length=512,
label=self.messages.label_table_name),
Field('record_id', 'integer', default=0,
label=self.messages.label_record_id,
requires=IS_INT_IN_RANGE(0, 10 ** 9)),
*extra_fields,
**dict(
migrate=self.__get_migrate(
settings.table_permission_name, migrate),
fake_migrate=fake_migrate))
if not settings.table_event_name in db.tables:
db.define_table(
settings.table_event_name,
Field('time_stamp', 'datetime',
default=current.request.now,
label=self.messages.label_time_stamp),
Field('client_ip',
default=current.request.client,
label=self.messages.label_client_ip),
Field('user_id', reference_table_user, default=None,
label=self.messages.label_user_id),
Field('origin', default='auth', length=512,
label=self.messages.label_origin,
requires=is_not_empty),
Field('description', 'text', default='',
label=self.messages.label_description,
requires=is_not_empty),
*settings.extra_fields.get(settings.table_event_name, []),
**dict(
migrate=self.__get_migrate(
settings.table_event_name, migrate),
fake_migrate=fake_migrate))
now = current.request.now
if settings.cas_domains:
if not settings.table_cas_name in db.tables:
db.define_table(
settings.table_cas_name,
Field('user_id', reference_table_user, default=None,
label=self.messages.label_user_id),
Field('created_on', 'datetime', default=now),
Field('service', requires=IS_URL()),
Field('ticket'),
Field('renew', 'boolean', default=False),
*settings.extra_fields.get(settings.table_cas_name, []),
**dict(
migrate=self.__get_migrate(
settings.table_cas_name, migrate),
fake_migrate=fake_migrate))
if not db._lazy_tables:
settings.table_user = db[settings.table_user_name]
settings.table_group = db[settings.table_group_name]
settings.table_membership = db[settings.table_membership_name]
settings.table_permission = db[settings.table_permission_name]
settings.table_event = db[settings.table_event_name]
if settings.cas_domains:
settings.table_cas = db[settings.table_cas_name]
if settings.cas_provider: # THIS IS NOT LAZY
settings.actions_disabled = \
['profile', 'register', 'change_password',
'request_reset_password', 'retrieve_username']
from gluon.contrib.login_methods.cas_auth import CasAuth
maps = settings.cas_maps
if not maps:
table_user = self.table_user()
maps = dict((name, lambda v, n=name: v.get(n, None)) for name in
table_user.fields if name != 'id'
and table_user[name].readable)
maps['registration_id'] = \
lambda v, p=settings.cas_provider: '%s/%s' % (p, v['user'])
actions = [settings.cas_actions['login'],
settings.cas_actions['servicevalidate'],
settings.cas_actions['logout']]
settings.login_form = CasAuth(
casversion=2,
urlbase=settings.cas_provider,
actions=actions,
maps=maps)
return self
def log_event(self, description, vars=None, origin='auth'):
"""
Examples:
Use as::
auth.log_event(description='this happened', origin='auth')
"""
if not self.settings.logging_enabled or not description:
return
elif self.is_logged_in():
user_id = self.user.id
else:
user_id = None # user unknown
vars = vars or {}
# log messages should not be translated
if type(description).__name__ == 'lazyT':
description = description.m
self.table_event().insert(
description=str(description % vars),
origin=origin, user_id=user_id)
def get_or_create_user(self, keys, update_fields=['email'],
login=True, get=True):
"""
Used for alternate login methods:
If the user exists already then password is updated.
If the user doesn't yet exist, then they are created.
"""
table_user = self.table_user()
user = None
checks = []
# make a guess about who this user is
for fieldname in ['registration_id', 'username', 'email']:
if fieldname in table_user.fields() and \
keys.get(fieldname, None):
checks.append(fieldname)
value = keys[fieldname]
user = table_user(**{fieldname: value})
if user:
break
if not checks:
return None
if not 'registration_id' in keys:
keys['registration_id'] = keys[checks[0]]
# if we think we found the user but registration_id does not match,
# make new user
if 'registration_id' in checks \
and user \
and user.registration_id \
and ('registration_id' not in keys or user.registration_id != str(keys['registration_id'])):
user = None # THINK MORE ABOUT THIS? DO WE TRUST OPENID PROVIDER?
if user:
if not get:
# added for register_bare to avoid overwriting users
return None
update_keys = dict(registration_id=keys['registration_id'])
for key in update_fields:
if key in keys:
update_keys[key] = keys[key]
user.update_record(**update_keys)
elif checks:
if not 'first_name' in keys and 'first_name' in table_user.fields:
guess = keys.get('email', 'anonymous').split('@')[0]
keys['first_name'] = keys.get('username', guess)
user_id = table_user.insert(**table_user._filter_fields(keys))
user = table_user[user_id]
if self.settings.create_user_groups:
group_id = self.add_group(
self.settings.create_user_groups % user)
self.add_membership(group_id, user_id)
if self.settings.everybody_group_id:
self.add_membership(self.settings.everybody_group_id, user_id)
if login:
self.user = user
return user
def basic(self, basic_auth_realm=False):
"""
Performs basic login.
Args:
basic_auth_realm: optional basic http authentication realm. Can take
str or unicode or function or callable or boolean.
reads current.request.env.http_authorization
and returns basic_allowed,basic_accepted,user.
if basic_auth_realm is defined is a callable it's return value
is used to set the basic authentication realm, if it's a string
its content is used instead. Otherwise basic authentication realm
is set to the application name.
If basic_auth_realm is None or False (the default) the behavior
is to skip sending any challenge.
"""
if not self.settings.allow_basic_login:
return (False, False, False)
basic = current.request.env.http_authorization
if basic_auth_realm:
if callable(basic_auth_realm):
basic_auth_realm = basic_auth_realm()
elif isinstance(basic_auth_realm, (unicode, str)):
basic_realm = unicode(basic_auth_realm)
elif basic_auth_realm is True:
basic_realm = u'' + current.request.application
http_401 = HTTP(401, u'Not Authorized', **{'WWW-Authenticate': u'Basic realm="' + basic_realm + '"'})
if not basic or not basic[:6].lower() == 'basic ':
if basic_auth_realm:
raise http_401
return (True, False, False)
(username, sep, password) = base64.b64decode(basic[6:]).partition(':')
is_valid_user = sep and self.login_bare(username, password)
if not is_valid_user and basic_auth_realm:
raise http_401
return (True, True, is_valid_user)
def login_user(self, user):
"""
Logins the `user = db.auth_user(id)`
"""
from gluon.settings import global_settings
if global_settings.web2py_runtime_gae:
user = Row(self.table_user()._filter_fields(user, id=True))
delattr(user, 'password')
else:
user = Row(user)
for key, value in user.items():
if callable(value) or key == 'password':
delattr(user, key)
if self.settings.renew_session_onlogin:
current.session.renew(clear_session=not self.settings.keep_session_onlogin)
current.session.auth = Storage(user=user,
last_visit=current.request.now,
expiration=self.settings.expiration,
hmac_key=web2py_uuid())
self.user = user
self.update_groups()
def _get_login_settings(self):
table_user = self.table_user()
userfield = self.settings.login_userfield or 'username' \
if 'username' in table_user.fields else 'email'
passfield = self.settings.password_field
return Storage({"table_user": table_user,
"userfield": userfield,
"passfield": passfield})
def login_bare(self, username, password):
"""
Logins user as specified by username (or email) and password
"""
settings = self._get_login_settings()
user = settings.table_user(**{settings.userfield: \
username})
if user and user.get(settings.passfield, False):
password = settings.table_user[
settings.passfield].validate(password)[0]
if ((user.registration_key is None or
not user.registration_key.strip()) and
password == user[settings.passfield]):
self.login_user(user)
return user
else:
# user not in database try other login methods
for login_method in self.settings.login_methods:
if login_method != self and login_method(username, password):
self.user = username
return username
return False
def register_bare(self, **fields):
"""
Registers a user as specified by username (or email)
and a raw password.
"""
settings = self._get_login_settings()
if not fields.get(settings.passfield):
raise ValueError("register_bare: " +
"password not provided or invalid")
elif not fields.get(settings.userfield):
raise ValueError("register_bare: " +
"userfield not provided or invalid")
fields[settings.passfield] = settings.table_user[settings.passfield].validate(fields[settings.passfield])[0]
user = self.get_or_create_user(fields, login=False, get=False, update_fields=self.settings.update_fields)
if not user:
# get or create did not create a user (it ignores duplicate records)
return False
return user
def cas_login(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
version=2,
):
request = current.request
response = current.response
session = current.session
db, table = self.db, self.table_cas()
session._cas_service = request.vars.service or session._cas_service
if not request.env.http_host in self.settings.cas_domains or \
not session._cas_service:
raise HTTP(403, 'not authorized')
def allow_access(interactivelogin=False):
row = table(service=session._cas_service, user_id=self.user.id)
if row:
ticket = row.ticket
else:
ticket = 'ST-' + web2py_uuid()
table.insert(service=session._cas_service,
user_id=self.user.id,
ticket=ticket,
created_on=request.now,
renew=interactivelogin)
service = session._cas_service
query_sep = '&' if '?' in service else '?'
del session._cas_service
if 'warn' in request.vars and not interactivelogin:
response.headers[
'refresh'] = "5;URL=%s" % service + query_sep + "ticket=" + ticket
return A("Continue to %s" % service,
_href=service + query_sep + "ticket=" + ticket)
else:
redirect(service + query_sep + "ticket=" + ticket)
if self.is_logged_in() and not 'renew' in request.vars:
return allow_access()
elif not self.is_logged_in() and 'gateway' in request.vars:
redirect(service)
def cas_onaccept(form, onaccept=onaccept):
if not onaccept is DEFAULT:
onaccept(form)
return allow_access(interactivelogin=True)
return self.login(next, onvalidation, cas_onaccept, log)
def cas_validate(self, version=2, proxy=False):
request = current.request
db, table = self.db, self.table_cas()
current.response.headers['Content-Type'] = 'text'
ticket = request.vars.ticket
renew = 'renew' in request.vars
row = table(ticket=ticket)
success = False
if row:
userfield = self.settings.login_userfield or 'username' \
if 'username' in table.fields else 'email'
# If ticket is a service Ticket and RENEW flag respected
if ticket[0:3] == 'ST-' and \
not ((row.renew and renew) ^ renew):
user = self.table_user()(row.user_id)
row.delete_record()
success = True
def build_response(body):
return '<?xml version="1.0" encoding="UTF-8"?>\n' +\
TAG['cas:serviceResponse'](
body, **{'_xmlns:cas': 'http://www.yale.edu/tp/cas'}).xml()
if success:
if version == 1:
message = 'yes\n%s' % user[userfield]
else: # assume version 2
username = user.get('username', user[userfield])
message = build_response(
TAG['cas:authenticationSuccess'](
TAG['cas:user'](username),
*[TAG['cas:' + field.name](user[field.name])
for field in self.table_user()
if field.readable]))
else:
if version == 1:
message = 'no\n'
elif row:
message = build_response(TAG['cas:authenticationFailure']())
else:
message = build_response(
TAG['cas:authenticationFailure'](
'Ticket %s not recognized' % ticket,
_code='INVALID TICKET'))
raise HTTP(200, message)
def _reset_two_factor_auth(self, session):
"""When two-step authentication is enabled, this function is used to
clear the session after successfully completing second challenge
or when the maximum number of tries allowed has expired.
"""
session.auth_two_factor_user = None
session.auth_two_factor = None
session.auth_two_factor_enabled = False
# Allow up to 4 attempts (the 1st one plus 3 more)
session.auth_two_factor_tries_left = 3
def login(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a login form
"""
table_user = self.table_user()
settings = self.settings
if 'username' in table_user.fields or \
not settings.login_email_validate:
tmpvalidator = IS_NOT_EMPTY(error_message=self.messages.is_empty)
if not settings.username_case_sensitive:
tmpvalidator = [IS_LOWER(), tmpvalidator]
else:
tmpvalidator = IS_EMAIL(error_message=self.messages.invalid_email)
if not settings.email_case_sensitive:
tmpvalidator = [IS_LOWER(), tmpvalidator]
request = current.request
response = current.response
session = current.session
passfield = settings.password_field
try:
table_user[passfield].requires[-1].min_length = 0
except:
pass
### use session for federated login
snext = self.get_vars_next()
if snext and self.settings.prevent_open_redirect_attacks:
items = snext.split('/')
if '//' in snext and items[2] != request.env.http_host:
snext = None
if snext:
session._auth_next = snext
elif session._auth_next:
snext = session._auth_next
### pass
if next is DEFAULT:
# important for security
next = settings.login_next
if callable(next):
next = next()
user_next = snext
if user_next:
external = user_next.split('://')
if external[0].lower() in ['http', 'https', 'ftp']:
host_next = user_next.split('//', 1)[-1].split('/')[0]
if host_next in settings.cas_domains:
next = user_next
else:
next = user_next
if onvalidation is DEFAULT:
onvalidation = settings.login_onvalidation
if onaccept is DEFAULT:
onaccept = settings.login_onaccept
if log is DEFAULT:
log = self.messages['login_log']
onfail = settings.login_onfail
user = None # default
#Setup the default field used for the form
multi_login = False
if self.settings.login_userfield:
username = self.settings.login_userfield
else:
if 'username' in table_user.fields:
username = 'username'
else:
username = 'email'
if self.settings.multi_login:
multi_login = True
old_requires = table_user[username].requires
table_user[username].requires = tmpvalidator
# If two-factor authentication is enabled, and the maximum
# number of tries allowed is used up, reset the session to
# pre-login state with two-factor auth
if session.auth_two_factor_enabled and session.auth_two_factor_tries_left < 1:
# Exceeded maximum allowed tries for this code. Require user to enter
# username and password again.
user = None
accepted_form = False
self._reset_two_factor_auth(session)
# Redirect to the default 'next' page without logging
# in. If that page requires login, user will be redirected
# back to the main login form
redirect(next, client_side=settings.client_side)
# Before showing the default login form, check whether
# we are already on the second step of two-step authentication.
# If we are, then skip this login form and use the form for the
# second challenge instead.
# Note to devs: The code inside the if-block is unchanged from the
# previous version of this file, other than for indentation inside
# to put it inside the if-block
if session.auth_two_factor_user is None:
if settings.remember_me_form:
extra_fields = [
Field('remember_me', 'boolean', default=False,
label = self.messages.label_remember_me)]
else:
extra_fields = []
# do we use our own login form, or from a central source?
if settings.login_form == self:
form = SQLFORM(
table_user,
fields=[username, passfield],
hidden=dict(_next=next),
showid=settings.showid,
submit_button=self.messages.login_button,
delete_label=self.messages.delete_label,
formstyle=settings.formstyle,
separator=settings.label_separator,
extra_fields = extra_fields,
)
captcha = settings.login_captcha or \
(settings.login_captcha != False and settings.captcha)
if captcha:
addrow(form, captcha.label, captcha, captcha.comment,
settings.formstyle, 'captcha__row')
accepted_form = False
if form.accepts(request, session if self.csrf_prevention else None,
formname='login', dbio=False,
onvalidation=onvalidation,
hideerror=settings.hideerror):
accepted_form = True
# check for username in db
entered_username = form.vars[username]
if multi_login and '@' in entered_username:
# if '@' in username check for email, not username
user = table_user(email = entered_username)
else:
user = table_user(**{username: entered_username})
if user:
# user in db, check if registration pending or disabled
temp_user = user
if temp_user.registration_key == 'pending':
response.flash = self.messages.registration_pending
return form
elif temp_user.registration_key in ('disabled', 'blocked'):
response.flash = self.messages.login_disabled
return form
elif (not temp_user.registration_key is None
and temp_user.registration_key.strip()):
response.flash = \
self.messages.registration_verifying
return form
# try alternate logins 1st as these have the
# current version of the password
user = None
for login_method in settings.login_methods:
if login_method != self and \
login_method(request.vars[username],
request.vars[passfield]):
if not self in settings.login_methods:
# do not store password in db
form.vars[passfield] = None
user = self.get_or_create_user(
form.vars, settings.update_fields)
break
if not user:
# alternates have failed, maybe because service inaccessible
if settings.login_methods[0] == self:
# try logging in locally using cached credentials
if form.vars.get(passfield, '') == temp_user[passfield]:
# success
user = temp_user
else:
# user not in db
if not settings.alternate_requires_registration:
# we're allowed to auto-register users from external systems
for login_method in settings.login_methods:
if login_method != self and \
login_method(request.vars[username],
request.vars[passfield]):
if not self in settings.login_methods:
# do not store password in db
form.vars[passfield] = None
user = self.get_or_create_user(
form.vars, settings.update_fields)
break
if not user:
self.log_event(self.messages['login_failed_log'],
request.post_vars)
# invalid login
session.flash = self.messages.invalid_login
callback(onfail, None)
redirect(
self.url(args=request.args, vars=request.get_vars),
client_side=settings.client_side)
else: # use a central authentication server
cas = settings.login_form
cas_user = cas.get_user()
if cas_user:
cas_user[passfield] = None
user = self.get_or_create_user(
table_user._filter_fields(cas_user),
settings.update_fields)
elif hasattr(cas, 'login_form'):
return cas.login_form()
else:
# we need to pass through login again before going on
next = self.url(settings.function, args='login')
redirect(cas.login_url(next),
client_side=settings.client_side)
# Extra login logic for two-factor authentication
#################################################
# If the 'user' variable has a value, this means that the first
# authentication step was successful (i.e. user provided correct
# username and password at the first challenge).
# Check if this user is signed up for two-factor authentication
# Default rule is that the user must be part of a group that is called
# auth.settings.two_factor_authentication_group
if user and self.settings.two_factor_authentication_group:
role = self.settings.two_factor_authentication_group
session.auth_two_factor_enabled = self.has_membership(user_id=user.id, role=role)
# challenge
if session.auth_two_factor_enabled:
form = SQLFORM.factory(
Field('authentication_code',
required=True,
comment='This code was emailed to you and is required for login.'),
hidden=dict(_next=next),
formstyle=settings.formstyle,
separator=settings.label_separator
)
# accepted_form is used by some default web2py code later in the
# function that handles running specified functions before redirect
# Set it to False until the challenge form is accepted.
accepted_form = False
# Handle the case when a user has submitted the login/password
# form successfully, and the password has been validated, but
# the two-factor form has not been displayed or validated yet.
if session.auth_two_factor_user is None and user is not None:
session.auth_two_factor_user = user # store the validated user and associate with this session
session.auth_two_factor = random.randint(100000, 999999)
session.auth_two_factor_tries_left = 3 # Allow user to try up to 4 times
# TODO: Add some error checking to handle cases where email cannot be sent
self.settings.mailer.send(
to=user.email,
subject="Two-step Login Authentication Code",
message="Your temporary login code is {0}".format(session.auth_two_factor))
if form.accepts(request, session if self.csrf_prevention else None,
formname='login', dbio=False,
onvalidation=onvalidation,
hideerror=settings.hideerror):
accepted_form = True
if form.vars['authentication_code'] == str(session.auth_two_factor):
# Handle the case when the two-factor form has been successfully validated
# and the user was previously stored (the current user should be None because
# in this case, the previous username/password login form should not be displayed.
# This will allow the code after the 2-factor authentication block to proceed as
# normal.
if user is None or user == session.auth_two_factor_user:
user = session.auth_two_factor_user
# For security, because the username stored in the
# session somehow does not match the just validated
# user. Should not be possible without session stealing
# which is hard with SSL.
elif user != session.auth_two_factor_user:
user = None
# Either way, the user and code associated with this session should
# be removed. This handles cases where the session login may have
# expired but browser window is open, so the old session key and
# session usernamem will still exist
self._reset_two_factor_auth(session)
else:
# TODO: Limit the number of retries allowed.
response.flash = 'Incorrect code. {0} more attempt(s) remaining.'.format(session.auth_two_factor_tries_left)
session.auth_two_factor_tries_left -= 1
return form
else:
return form
# End login logic for two-factor authentication
# process authenticated users
if user:
user = Row(table_user._filter_fields(user, id=True))
# process authenticated users
# user wants to be logged in for longer
self.login_user(user)
session.auth.expiration = \
request.post_vars.remember_me and \
settings.long_expiration or \
settings.expiration
session.auth.remember_me = 'remember_me' in request.post_vars
self.log_event(log, user)
session.flash = self.messages.logged_in
# how to continue
if settings.login_form == self:
if accepted_form:
callback(onaccept, form)
if next == session._auth_next:
session._auth_next = None
next = replace_id(next, form)
redirect(next, client_side=settings.client_side)
table_user[username].requires = old_requires
return form
elif user:
callback(onaccept, None)
if next == session._auth_next:
del session._auth_next
redirect(next, client_side=settings.client_side)
def logout(self, next=DEFAULT, onlogout=DEFAULT, log=DEFAULT):
"""
Logouts and redirects to login
"""
# Clear out 2-step authentication information if user logs
# out. This information is also cleared on successful login.
self._reset_two_factor_auth(current.session)
if next is DEFAULT:
next = self.get_vars_next() or self.settings.logout_next
if onlogout is DEFAULT:
onlogout = self.settings.logout_onlogout
if onlogout:
onlogout(self.user)
if log is DEFAULT:
log = self.messages['logout_log']
if self.user:
self.log_event(log, self.user)
if self.settings.login_form != self:
cas = self.settings.login_form
cas_user = cas.get_user()
if cas_user:
next = cas.logout_url(next)
current.session.auth = None
if self.settings.renew_session_onlogout:
current.session.renew(clear_session=not self.settings.keep_session_onlogout)
current.session.flash = self.messages.logged_out
if not next is None:
redirect(next)
def register(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a registration form
"""
table_user = self.table_user()
request = current.request
response = current.response
session = current.session
if self.is_logged_in():
redirect(self.settings.logged_url,
client_side=self.settings.client_side)
if next is DEFAULT:
next = self.get_vars_next() or self.settings.register_next
if onvalidation is DEFAULT:
onvalidation = self.settings.register_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.register_onaccept
if log is DEFAULT:
log = self.messages['register_log']
table_user = self.table_user()
if self.settings.login_userfield:
username = self.settings.login_userfield
elif 'username' in table_user.fields:
username = 'username'
else:
username = 'email'
# Ensure the username field is unique.
unique_validator = IS_NOT_IN_DB(self.db, table_user[username])
if not table_user[username].requires:
table_user[username].requires = unique_validator
elif isinstance(table_user[username].requires, (list, tuple)):
if not any([isinstance(validator, IS_NOT_IN_DB) for validator in
table_user[username].requires]):
if isinstance(table_user[username].requires, list):
table_user[username].requires.append(unique_validator)
else:
table_user[username].requires += (unique_validator, )
elif not isinstance(table_user[username].requires, IS_NOT_IN_DB):
table_user[username].requires = [table_user[username].requires,
unique_validator]
passfield = self.settings.password_field
formstyle = self.settings.formstyle
if self.settings.register_verify_password:
extra_fields = [
Field("password_two", "password", requires=IS_EQUAL_TO(
request.post_vars.get(passfield, None),
error_message=self.messages.mismatched_password),
label=current.T("Confirm Password"))]
else:
extra_fields = []
form = SQLFORM(table_user,
fields=self.settings.register_fields,
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.register_button,
delete_label=self.messages.delete_label,
formstyle=formstyle,
separator=self.settings.label_separator,
extra_fields = extra_fields
)
captcha = self.settings.register_captcha or self.settings.captcha
if captcha:
addrow(form, captcha.label, captcha,
captcha.comment, self.settings.formstyle, 'captcha__row')
#Add a message if specified
if self.settings.pre_registration_div:
addrow(form, '',
DIV(_id="pre-reg", *self.settings.pre_registration_div),
'', formstyle, '')
table_user.registration_key.default = key = web2py_uuid()
if form.accepts(request, session if self.csrf_prevention else None,
formname='register',
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
description = self.messages.group_description % form.vars
if self.settings.create_user_groups:
group_id = self.add_group(
self.settings.create_user_groups % form.vars, description)
self.add_membership(group_id, form.vars.id)
if self.settings.everybody_group_id:
self.add_membership(
self.settings.everybody_group_id, form.vars.id)
if self.settings.registration_requires_verification:
link = self.url(
self.settings.function, args=('verify_email', key), scheme=True)
d = dict(form.vars)
d.update(dict(key=key, link=link, username=form.vars[username]))
if not (self.settings.mailer and self.settings.mailer.send(
to=form.vars.email,
subject=self.messages.verify_email_subject,
message=self.messages.verify_email % d)):
self.db.rollback()
response.flash = self.messages.unable_send_email
return form
session.flash = self.messages.email_sent
if self.settings.registration_requires_approval and \
not self.settings.registration_requires_verification:
table_user[form.vars.id] = dict(registration_key='pending')
session.flash = self.messages.registration_pending
elif (not self.settings.registration_requires_verification or
self.settings.login_after_registration):
if not self.settings.registration_requires_verification:
table_user[form.vars.id] = dict(registration_key='')
session.flash = self.messages.registration_successful
user = table_user(**{username: form.vars[username]})
self.login_user(user)
session.flash = self.messages.logged_in
self.log_event(log, form.vars)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
return form
def is_logged_in(self):
"""
Checks if the user is logged in and returns True/False.
If so user is in auth.user as well as in session.auth.user
"""
if self.user:
return True
return False
def verify_email(self,
next=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Action used to verify the registration email
"""
key = getarg(-1)
table_user = self.table_user()
user = table_user(registration_key=key)
if not user:
redirect(self.settings.login_url)
if self.settings.registration_requires_approval:
user.update_record(registration_key='pending')
current.session.flash = self.messages.registration_pending
else:
user.update_record(registration_key='')
current.session.flash = self.messages.email_verified
# make sure session has same user.registrato_key as db record
if current.session.auth and current.session.auth.user:
current.session.auth.user.registration_key = user.registration_key
if log is DEFAULT:
log = self.messages['verify_email_log']
if next is DEFAULT:
next = self.settings.verify_email_next
if onaccept is DEFAULT:
onaccept = self.settings.verify_email_onaccept
self.log_event(log, user)
callback(onaccept, user)
redirect(next)
def retrieve_username(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to retrieve the user username
(only if there is a username field)
"""
table_user = self.table_user()
if not 'username' in table_user.fields:
raise HTTP(404)
request = current.request
response = current.response
session = current.session
captcha = self.settings.retrieve_username_captcha or \
(self.settings.retrieve_username_captcha != False and self.settings.captcha)
if not self.settings.mailer:
response.flash = self.messages.function_disabled
return ''
if next is DEFAULT:
next = self.get_vars_next() or self.settings.retrieve_username_next
if onvalidation is DEFAULT:
onvalidation = self.settings.retrieve_username_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.retrieve_username_onaccept
if log is DEFAULT:
log = self.messages['retrieve_username_log']
old_requires = table_user.email.requires
table_user.email.requires = [IS_IN_DB(self.db, table_user.email,
error_message=self.messages.invalid_email)]
form = SQLFORM(table_user,
fields=['email'],
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if captcha:
addrow(form, captcha.label, captcha,
captcha.comment, self.settings.formstyle, 'captcha__row')
if form.accepts(request, session if self.csrf_prevention else None,
formname='retrieve_username', dbio=False,
onvalidation=onvalidation, hideerror=self.settings.hideerror):
users = table_user._db(table_user.email==form.vars.email).select()
if not users:
current.session.flash = \
self.messages.invalid_email
redirect(self.url(args=request.args))
username = ', '.join(u.username for u in users)
self.settings.mailer.send(to=form.vars.email,
subject=self.messages.retrieve_username_subject,
message=self.messages.retrieve_username % dict(username=username))
session.flash = self.messages.email_sent
for user in users:
self.log_event(log, user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next)
table_user.email.requires = old_requires
return form
def random_password(self):
import string
import random
password = ''
specials = r'!#$*'
for i in range(0, 3):
password += random.choice(string.lowercase)
password += random.choice(string.uppercase)
password += random.choice(string.digits)
password += random.choice(specials)
return ''.join(random.sample(password, len(password)))
def reset_password_deprecated(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to reset the user password (deprecated)
"""
table_user = self.table_user()
request = current.request
response = current.response
session = current.session
if not self.settings.mailer:
response.flash = self.messages.function_disabled
return ''
if next is DEFAULT:
next = self.get_vars_next() or self.settings.retrieve_password_next
if onvalidation is DEFAULT:
onvalidation = self.settings.retrieve_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.retrieve_password_onaccept
if log is DEFAULT:
log = self.messages['retrieve_password_log']
old_requires = table_user.email.requires
table_user.email.requires = [IS_IN_DB(self.db, table_user.email,
error_message=self.messages.invalid_email)]
form = SQLFORM(table_user,
fields=['email'],
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request, session if self.csrf_prevention else None,
formname='retrieve_password', dbio=False,
onvalidation=onvalidation, hideerror=self.settings.hideerror):
user = table_user(email=form.vars.email)
if not user:
current.session.flash = \
self.messages.invalid_email
redirect(self.url(args=request.args))
elif user.registration_key in ('pending', 'disabled', 'blocked'):
current.session.flash = \
self.messages.registration_pending
redirect(self.url(args=request.args))
password = self.random_password()
passfield = self.settings.password_field
d = {
passfield: str(table_user[passfield].validate(password)[0]),
'registration_key': ''
}
user.update_record(**d)
if self.settings.mailer and \
self.settings.mailer.send(to=form.vars.email,
subject=self.messages.retrieve_password_subject,
message=self.messages.retrieve_password % dict(password=password)):
session.flash = self.messages.email_sent
else:
session.flash = self.messages.unable_to_send_email
self.log_event(log, user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next)
table_user.email.requires = old_requires
return form
def reset_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to reset the user password
"""
table_user = self.table_user()
request = current.request
# response = current.response
session = current.session
if next is DEFAULT:
next = self.get_vars_next() or self.settings.reset_password_next
if self.settings.prevent_password_reset_attacks:
key = request.vars.key
if key:
session._reset_password_key = key
redirect(self.url(args='reset_password'))
else:
key = session._reset_password_key
else:
key = request.vars.key
try:
t0 = int(key.split('-')[0])
if time.time() - t0 > 60 * 60 * 24:
raise Exception
user = table_user(reset_password_key=key)
if not user:
raise Exception
except Exception:
session.flash = self.messages.invalid_reset_password
redirect(next, client_side=self.settings.client_side)
passfield = self.settings.password_field
form = SQLFORM.factory(
Field('new_password', 'password',
label=self.messages.new_password,
requires=self.table_user()[passfield].requires),
Field('new_password2', 'password',
label=self.messages.verify_password,
requires=[IS_EXPR(
'value==%s' % repr(request.vars.new_password),
self.messages.mismatched_password)]),
submit_button=self.messages.password_reset_button,
hidden=dict(_next=next),
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request, session,
hideerror=self.settings.hideerror):
user.update_record(
**{passfield: str(form.vars.new_password),
'registration_key': '',
'reset_password_key': ''})
session.flash = self.messages.password_changed
if self.settings.login_after_password_change:
self.login_user(user)
redirect(next, client_side=self.settings.client_side)
return form
def request_reset_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to reset the user password
"""
table_user = self.table_user()
request = current.request
response = current.response
session = current.session
captcha = self.settings.retrieve_password_captcha or \
(self.settings.retrieve_password_captcha != False and self.settings.captcha)
if next is DEFAULT:
next = self.get_vars_next() or self.settings.request_reset_password_next
if not self.settings.mailer:
response.flash = self.messages.function_disabled
return ''
if onvalidation is DEFAULT:
onvalidation = self.settings.reset_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.reset_password_onaccept
if log is DEFAULT:
log = self.messages['reset_password_log']
userfield = self.settings.login_userfield or 'username' \
if 'username' in table_user.fields else 'email'
if userfield == 'email':
table_user.email.requires = [
IS_EMAIL(error_message=self.messages.invalid_email),
IS_IN_DB(self.db, table_user.email,
error_message=self.messages.invalid_email)]
if not self.settings.email_case_sensitive:
table_user.email.requires.insert(0, IS_LOWER())
else:
table_user.username.requires = [
IS_IN_DB(self.db, table_user.username,
error_message=self.messages.invalid_username)]
if not self.settings.username_case_sensitive:
table_user.username.requires.insert(0, IS_LOWER())
form = SQLFORM(table_user,
fields=[userfield],
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.password_reset_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if captcha:
addrow(form, captcha.label, captcha,
captcha.comment, self.settings.formstyle, 'captcha__row')
if form.accepts(request, session if self.csrf_prevention else None,
formname='reset_password', dbio=False,
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
user = table_user(**{userfield:form.vars.get(userfield)})
if not user:
session.flash = self.messages['invalid_%s' % userfield]
redirect(self.url(args=request.args),
client_side=self.settings.client_side)
elif user.registration_key in ('pending', 'disabled', 'blocked'):
session.flash = self.messages.registration_pending
redirect(self.url(args=request.args),
client_side=self.settings.client_side)
if self.email_reset_password(user):
session.flash = self.messages.email_sent
else:
session.flash = self.messages.unable_to_send_email
self.log_event(log, user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
# old_requires = table_user.email.requires
return form
def email_reset_password(self, user):
reset_password_key = str(int(time.time())) + '-' + web2py_uuid()
link = self.url(self.settings.function,
args=('reset_password',), vars={'key': reset_password_key},
scheme=True)
d = dict(user)
d.update(dict(key=reset_password_key, link=link))
if self.settings.mailer and self.settings.mailer.send(
to=user.email,
subject=self.messages.reset_password_subject,
message=self.messages.reset_password % d):
user.update_record(reset_password_key=reset_password_key)
return True
return False
def retrieve_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
if self.settings.reset_password_requires_verification:
return self.request_reset_password(next, onvalidation, onaccept, log)
else:
return self.reset_password_deprecated(next, onvalidation, onaccept, log)
def change_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form that lets the user change password
"""
if not self.is_logged_in():
redirect(self.settings.login_url,
client_side=self.settings.client_side)
db = self.db
table_user = self.table_user()
s = db(table_user.id == self.user.id)
request = current.request
session = current.session
if next is DEFAULT:
next = self.get_vars_next() or self.settings.change_password_next
if onvalidation is DEFAULT:
onvalidation = self.settings.change_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.change_password_onaccept
if log is DEFAULT:
log = self.messages['change_password_log']
passfield = self.settings.password_field
requires = table_user[passfield].requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
requires = filter(lambda t: isinstance(t, CRYPT), requires)
if requires:
requires[0].min_length = 0
form = SQLFORM.factory(
Field('old_password', 'password', requires=requires,
label=self.messages.old_password),
Field('new_password', 'password',
label=self.messages.new_password,
requires=table_user[passfield].requires),
Field('new_password2', 'password',
label=self.messages.verify_password,
requires=[IS_EXPR(
'value==%s' % repr(request.vars.new_password),
self.messages.mismatched_password)]),
submit_button=self.messages.password_change_button,
hidden=dict(_next=next),
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request, session,
formname='change_password',
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
current_user = s.select(limitby=(0, 1), orderby_on_limitby=False).first()
if not form.vars['old_password'] == current_user[passfield]:
form.errors['old_password'] = self.messages.invalid_password
else:
d = {passfield: str(form.vars.new_password)}
s.update(**d)
session.flash = self.messages.password_changed
self.log_event(log, self.user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
return form
def profile(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form that lets the user change his/her profile
"""
table_user = self.table_user()
if not self.is_logged_in():
redirect(self.settings.login_url,
client_side=self.settings.client_side)
passfield = self.settings.password_field
table_user[passfield].writable = False
request = current.request
session = current.session
if next is DEFAULT:
next = self.get_vars_next() or self.settings.profile_next
if onvalidation is DEFAULT:
onvalidation = self.settings.profile_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.profile_onaccept
if log is DEFAULT:
log = self.messages['profile_log']
form = SQLFORM(
table_user,
self.user.id,
fields=self.settings.profile_fields,
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.profile_save_button,
delete_label=self.messages.delete_label,
upload=self.settings.download_url,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator,
deletable=self.settings.allow_delete_accounts,
)
if form.accepts(request, session,
formname='profile',
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
self.user.update(table_user._filter_fields(form.vars))
session.flash = self.messages.profile_updated
self.log_event(log, self.user)
callback(onaccept, form)
if form.deleted:
return self.logout()
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
return form
def run_login_onaccept(self):
onaccept = self.settings.login_onaccept
if onaccept:
form = Storage(dict(vars=self.user))
if not isinstance(onaccept, (list, tuple)):
onaccept = [onaccept]
for callback in onaccept:
callback(form)
def is_impersonating(self):
return self.is_logged_in() and 'impersonator' in current.session.auth
def impersonate(self, user_id=DEFAULT):
"""
To use this make a POST to
`http://..../impersonate request.post_vars.user_id=<id>`
Set request.post_vars.user_id to 0 to restore original user.
requires impersonator is logged in and::
has_permission('impersonate', 'auth_user', user_id)
"""
request = current.request
session = current.session
auth = session.auth
table_user = self.table_user()
if not self.is_logged_in():
raise HTTP(401, "Not Authorized")
current_id = auth.user.id
requested_id = user_id
if user_id is DEFAULT:
user_id = current.request.post_vars.user_id
if user_id and user_id != self.user.id and user_id != '0':
if not self.has_permission('impersonate',
self.table_user(),
user_id):
raise HTTP(403, "Forbidden")
user = table_user(user_id)
if not user:
raise HTTP(401, "Not Authorized")
auth.impersonator = pickle.dumps(session, pickle.HIGHEST_PROTOCOL)
auth.user.update(
table_user._filter_fields(user, True))
self.user = auth.user
self.update_groups()
log = self.messages['impersonate_log']
self.log_event(log, dict(id=current_id, other_id=auth.user.id))
self.run_login_onaccept()
elif user_id in (0, '0'):
if self.is_impersonating():
session.clear()
session.update(pickle.loads(auth.impersonator))
self.user = session.auth.user
self.update_groups()
self.run_login_onaccept()
return None
if requested_id is DEFAULT and not request.post_vars:
return SQLFORM.factory(Field('user_id', 'integer'))
return SQLFORM(table_user, user.id, readonly=True)
def update_groups(self):
if not self.user:
return
user_groups = self.user_groups = {}
if current.session.auth:
current.session.auth.user_groups = self.user_groups
table_group = self.table_group()
table_membership = self.table_membership()
memberships = self.db(
table_membership.user_id == self.user.id).select()
for membership in memberships:
group = table_group(membership.group_id)
if group:
user_groups[membership.group_id] = group.role
def groups(self):
"""
Displays the groups and their roles for the logged in user
"""
if not self.is_logged_in():
redirect(self.settings.login_url)
table_membership = self.table_membership()
memberships = self.db(
table_membership.user_id == self.user.id).select()
table = TABLE()
for membership in memberships:
table_group = self.table_group()
groups = self.db(table_group.id == membership.group_id).select()
if groups:
group = groups[0]
table.append(TR(H3(group.role, '(%s)' % group.id)))
table.append(TR(P(group.description)))
if not memberships:
return None
return table
def not_authorized(self):
"""
You can change the view for this page to make it look as you like
"""
if current.request.ajax:
raise HTTP(403, 'ACCESS DENIED')
return self.messages.access_denied
def requires(self, condition, requires_login=True, otherwise=None):
"""
Decorator that prevents access to action if not logged in
"""
def decorator(action):
def f(*a, **b):
basic_allowed, basic_accepted, user = self.basic()
user = user or self.user
if requires_login:
if not user:
if current.request.ajax:
raise HTTP(401, self.messages.ajax_failed_authentication)
elif not otherwise is None:
if callable(otherwise):
return otherwise()
redirect(otherwise)
elif self.settings.allow_basic_login_only or \
basic_accepted or current.request.is_restful:
raise HTTP(403, "Not authorized")
else:
next = self.here()
current.session.flash = current.response.flash
return call_or_redirect(
self.settings.on_failed_authentication,
self.settings.login_url +
'?_next=' + urllib.quote(next))
if callable(condition):
flag = condition()
else:
flag = condition
if not flag:
current.session.flash = self.messages.access_denied
return call_or_redirect(
self.settings.on_failed_authorization)
return action(*a, **b)
f.__doc__ = action.__doc__
f.__name__ = action.__name__
f.__dict__.update(action.__dict__)
return f
return decorator
def requires_login(self, otherwise=None):
"""
Decorator that prevents access to action if not logged in
"""
return self.requires(True, otherwise=otherwise)
def requires_membership(self, role=None, group_id=None, otherwise=None):
"""
Decorator that prevents access to action if not logged in or
if user logged in is not a member of group_id.
If role is provided instead of group_id then the
group_id is calculated.
"""
def has_membership(self=self, group_id=group_id, role=role):
return self.has_membership(group_id=group_id, role=role)
return self.requires(has_membership, otherwise=otherwise)
def requires_permission(self, name, table_name='', record_id=0,
otherwise=None):
"""
Decorator that prevents access to action if not logged in or
if user logged in is not a member of any group (role) that
has 'name' access to 'table_name', 'record_id'.
"""
def has_permission(self=self, name=name, table_name=table_name, record_id=record_id):
return self.has_permission(name, table_name, record_id)
return self.requires(has_permission, otherwise=otherwise)
def requires_signature(self, otherwise=None, hash_vars=True):
"""
Decorator that prevents access to action if not logged in or
if user logged in is not a member of group_id.
If role is provided instead of group_id then the
group_id is calculated.
"""
def verify():
return URL.verify(current.request, user_signature=True, hash_vars=hash_vars)
return self.requires(verify, otherwise)
def add_group(self, role, description=''):
"""
Creates a group associated to a role
"""
group_id = self.table_group().insert(
role=role, description=description)
self.log_event(self.messages['add_group_log'],
dict(group_id=group_id, role=role))
return group_id
def del_group(self, group_id):
"""
Deletes a group
"""
self.db(self.table_group().id == group_id).delete()
self.db(self.table_membership().group_id == group_id).delete()
self.db(self.table_permission().group_id == group_id).delete()
if group_id in self.user_groups: del self.user_groups[group_id]
self.log_event(self.messages.del_group_log, dict(group_id=group_id))
def id_group(self, role):
"""
Returns the group_id of the group specified by the role
"""
rows = self.db(self.table_group().role == role).select()
if not rows:
return None
return rows[0].id
def user_group(self, user_id=None):
"""
Returns the group_id of the group uniquely associated to this user
i.e. `role=user:[user_id]`
"""
return self.id_group(self.user_group_role(user_id))
def user_group_role(self, user_id=None):
if not self.settings.create_user_groups:
return None
if user_id:
user = self.table_user()[user_id]
else:
user = self.user
return self.settings.create_user_groups % user
def has_membership(self, group_id=None, user_id=None, role=None):
"""
Checks if user is member of group_id or role
"""
group_id = group_id or self.id_group(role)
try:
group_id = int(group_id)
except:
group_id = self.id_group(group_id) # interpret group_id as a role
if not user_id and self.user:
user_id = self.user.id
membership = self.table_membership()
if group_id and user_id and self.db((membership.user_id == user_id)
& (membership.group_id == group_id)).select():
r = True
else:
r = False
self.log_event(self.messages['has_membership_log'],
dict(user_id=user_id, group_id=group_id, check=r))
return r
def add_membership(self, group_id=None, user_id=None, role=None):
"""
Gives user_id membership of group_id or role
if user is None than user_id is that of current logged in user
"""
group_id = group_id or self.id_group(role)
try:
group_id = int(group_id)
except:
group_id = self.id_group(group_id) # interpret group_id as a role
if not user_id and self.user:
user_id = self.user.id
membership = self.table_membership()
record = membership(user_id=user_id, group_id=group_id)
if record:
return record.id
else:
id = membership.insert(group_id=group_id, user_id=user_id)
if role:
self.user_groups[group_id] = role
else:
self.update_groups()
self.log_event(self.messages['add_membership_log'],
dict(user_id=user_id, group_id=group_id))
return id
def del_membership(self, group_id=None, user_id=None, role=None):
"""
Revokes membership from group_id to user_id
if user_id is None than user_id is that of current logged in user
"""
group_id = group_id or self.id_group(role)
if not user_id and self.user:
user_id = self.user.id
membership = self.table_membership()
self.log_event(self.messages['del_membership_log'],
dict(user_id=user_id, group_id=group_id))
ret = self.db(membership.user_id
== user_id)(membership.group_id
== group_id).delete()
if group_id in self.user_groups: del self.user_groups[group_id]
return ret
def has_permission(self,
name='any',
table_name='',
record_id=0,
user_id=None,
group_id=None,
):
"""
Checks if user_id or current logged in user is member of a group
that has 'name' permission on 'table_name' and 'record_id'
if group_id is passed, it checks whether the group has the permission
"""
if not group_id and self.settings.everybody_group_id and \
self.has_permission(
name, table_name, record_id, user_id=None,
group_id=self.settings.everybody_group_id):
return True
if not user_id and not group_id and self.user:
user_id = self.user.id
if user_id:
membership = self.table_membership()
rows = self.db(membership.user_id
== user_id).select(membership.group_id)
groups = set([row.group_id for row in rows])
if group_id and not group_id in groups:
return False
else:
groups = set([group_id])
permission = self.table_permission()
rows = self.db(permission.name == name)(permission.table_name
== str(table_name))(permission.record_id
== record_id).select(permission.group_id)
groups_required = set([row.group_id for row in rows])
if record_id:
rows = self.db(permission.name
== name)(permission.table_name
== str(table_name))(permission.record_id
== 0).select(permission.group_id)
groups_required = groups_required.union(set([row.group_id
for row in rows]))
if groups.intersection(groups_required):
r = True
else:
r = False
if user_id:
self.log_event(self.messages['has_permission_log'],
dict(user_id=user_id, name=name,
table_name=table_name, record_id=record_id))
return r
def add_permission(self,
group_id,
name='any',
table_name='',
record_id=0,
):
"""
Gives group_id 'name' access to 'table_name' and 'record_id'
"""
permission = self.table_permission()
if group_id == 0:
group_id = self.user_group()
record = self.db(permission.group_id == group_id)(permission.name == name)(permission.table_name == str(table_name))(
permission.record_id == long(record_id)).select(limitby=(0, 1), orderby_on_limitby=False).first()
if record:
id = record.id
else:
id = permission.insert(group_id=group_id, name=name,
table_name=str(table_name),
record_id=long(record_id))
self.log_event(self.messages['add_permission_log'],
dict(permission_id=id, group_id=group_id,
name=name, table_name=table_name,
record_id=record_id))
return id
def del_permission(self,
group_id,
name='any',
table_name='',
record_id=0,
):
"""
Revokes group_id 'name' access to 'table_name' and 'record_id'
"""
permission = self.table_permission()
self.log_event(self.messages['del_permission_log'],
dict(group_id=group_id, name=name,
table_name=table_name, record_id=record_id))
return self.db(permission.group_id == group_id)(permission.name
== name)(permission.table_name
== str(table_name))(permission.record_id
== long(record_id)).delete()
def accessible_query(self, name, table, user_id=None):
"""
Returns a query with all accessible records for user_id or
the current logged in user
this method does not work on GAE because uses JOIN and IN
Example:
Use as::
db(auth.accessible_query('read', db.mytable)).select(db.mytable.ALL)
"""
if not user_id:
user_id = self.user_id
db = self.db
if isinstance(table, str) and table in self.db.tables():
table = self.db[table]
elif isinstance(table, (Set, Query)):
# experimental: build a chained query for all tables
if isinstance(table, Set):
cquery = table.query
else:
cquery = table
tablenames = db._adapter.tables(cquery)
for tablename in tablenames:
cquery &= self.accessible_query(name, tablename,
user_id=user_id)
return cquery
if not isinstance(table, str) and\
self.has_permission(name, table, 0, user_id):
return table.id > 0
membership = self.table_membership()
permission = self.table_permission()
query = table.id.belongs(
db(membership.user_id == user_id)
(membership.group_id == permission.group_id)
(permission.name == name)
(permission.table_name == table)
._select(permission.record_id))
if self.settings.everybody_group_id:
query |= table.id.belongs(
db(permission.group_id == self.settings.everybody_group_id)
(permission.name == name)
(permission.table_name == table)
._select(permission.record_id))
return query
@staticmethod
def archive(form,
archive_table=None,
current_record='current_record',
archive_current=False,
fields=None):
"""
If you have a table (db.mytable) that needs full revision history you
can just do::
form=crud.update(db.mytable,myrecord,onaccept=auth.archive)
or::
form=SQLFORM(db.mytable,myrecord).process(onaccept=auth.archive)
crud.archive will define a new table "mytable_archive" and store
a copy of the current record (if archive_current=True)
or a copy of the previous record (if archive_current=False)
in the newly created table including a reference
to the current record.
fields allows to specify extra fields that need to be archived.
If you want to access such table you need to define it yourself
in a model::
db.define_table('mytable_archive',
Field('current_record',db.mytable),
db.mytable)
Notice such table includes all fields of db.mytable plus one: current_record.
crud.archive does not timestamp the stored record unless your original table
has a fields like::
db.define_table(...,
Field('saved_on','datetime',
default=request.now,update=request.now,writable=False),
Field('saved_by',auth.user,
default=auth.user_id,update=auth.user_id,writable=False),
there is nothing special about these fields since they are filled before
the record is archived.
If you want to change the archive table name and the name of the reference field
you can do, for example::
db.define_table('myhistory',
Field('parent_record',db.mytable),
db.mytable)
and use it as::
form=crud.update(db.mytable,myrecord,
onaccept=lambda form:crud.archive(form,
archive_table=db.myhistory,
current_record='parent_record'))
"""
if not archive_current and not form.record:
return None
table = form.table
if not archive_table:
archive_table_name = '%s_archive' % table
if not archive_table_name in table._db:
table._db.define_table(
archive_table_name,
Field(current_record, table),
*[field.clone(unique=False) for field in table])
archive_table = table._db[archive_table_name]
new_record = {current_record: form.vars.id}
for fieldname in archive_table.fields:
if not fieldname in ['id', current_record]:
if archive_current and fieldname in form.vars:
new_record[fieldname] = form.vars[fieldname]
elif form.record and fieldname in form.record:
new_record[fieldname] = form.record[fieldname]
if fields:
new_record.update(fields)
id = archive_table.insert(**new_record)
return id
def wiki(self,
slug=None,
env=None,
render='markmin',
manage_permissions=False,
force_prefix='',
restrict_search=False,
resolve=True,
extra=None,
menu_groups=None,
templates=None,
migrate=True,
controller=None,
function=None,
force_render=False,
groups=None):
if controller and function:
resolve = False
if not hasattr(self, '_wiki'):
self._wiki = Wiki(self, render=render,
manage_permissions=manage_permissions,
force_prefix=force_prefix,
restrict_search=restrict_search,
env=env, extra=extra or {},
menu_groups=menu_groups,
templates=templates,
migrate=migrate,
controller=controller,
function=function,
groups=groups)
else:
self._wiki.env.update(env or {})
# if resolve is set to True, process request as wiki call
# resolve=False allows initial setup without wiki redirection
wiki = None
if resolve:
if slug:
wiki = self._wiki.read(slug, force_render)
if isinstance(wiki, dict) and wiki.has_key('content'): # FIXME: .has_key() is deprecated
# We don't want to return a dict object, just the wiki
wiki = wiki['content']
else:
wiki = self._wiki()
if isinstance(wiki, basestring):
wiki = XML(wiki)
return wiki
def wikimenu(self):
"""To be used in menu.py for app wide wiki menus"""
if (hasattr(self, "_wiki") and
self._wiki.settings.controller and
self._wiki.settings.function):
self._wiki.automenu()
class Crud(object):
def url(self, f=None, args=None, vars=None):
"""
This should point to the controller that exposes
download and crud
"""
if args is None:
args = []
if vars is None:
vars = {}
return URL(c=self.settings.controller, f=f, args=args, vars=vars)
def __init__(self, environment, db=None, controller='default'):
self.db = db
if not db and environment and isinstance(environment, DAL):
self.db = environment
elif not db:
raise SyntaxError("must pass db as first or second argument")
self.environment = current
settings = self.settings = Settings()
settings.auth = None
settings.logger = None
settings.create_next = None
settings.update_next = None
settings.controller = controller
settings.delete_next = self.url()
settings.download_url = self.url('download')
settings.create_onvalidation = StorageList()
settings.update_onvalidation = StorageList()
settings.delete_onvalidation = StorageList()
settings.create_onaccept = StorageList()
settings.update_onaccept = StorageList()
settings.update_ondelete = StorageList()
settings.delete_onaccept = StorageList()
settings.update_deletable = True
settings.showid = False
settings.keepvalues = False
settings.create_captcha = None
settings.update_captcha = None
settings.captcha = None
settings.formstyle = 'table3cols'
settings.label_separator = ': '
settings.hideerror = False
settings.detect_record_change = True
settings.hmac_key = None
settings.lock_keys = True
messages = self.messages = Messages(current.T)
messages.submit_button = 'Submit'
messages.delete_label = 'Check to delete'
messages.record_created = 'Record Created'
messages.record_updated = 'Record Updated'
messages.record_deleted = 'Record Deleted'
messages.update_log = 'Record %(id)s updated'
messages.create_log = 'Record %(id)s created'
messages.read_log = 'Record %(id)s read'
messages.delete_log = 'Record %(id)s deleted'
messages.lock_keys = True
def __call__(self):
args = current.request.args
if len(args) < 1:
raise HTTP(404)
elif args[0] == 'tables':
return self.tables()
elif len(args) > 1 and not args(1) in self.db.tables:
raise HTTP(404)
table = self.db[args(1)]
if args[0] == 'create':
return self.create(table)
elif args[0] == 'select':
return self.select(table, linkto=self.url(args='read'))
elif args[0] == 'search':
form, rows = self.search(table, linkto=self.url(args='read'))
return DIV(form, SQLTABLE(rows))
elif args[0] == 'read':
return self.read(table, args(2))
elif args[0] == 'update':
return self.update(table, args(2))
elif args[0] == 'delete':
return self.delete(table, args(2))
else:
raise HTTP(404)
def log_event(self, message, vars):
if self.settings.logger:
self.settings.logger.log_event(message, vars, origin='crud')
def has_permission(self, name, table, record=0):
if not self.settings.auth:
return True
try:
record_id = record.id
except:
record_id = record
return self.settings.auth.has_permission(name, str(table), record_id)
def tables(self):
return TABLE(*[TR(A(name,
_href=self.url(args=('select', name))))
for name in self.db.tables])
@staticmethod
def archive(form, archive_table=None, current_record='current_record'):
return Auth.archive(form, archive_table=archive_table,
current_record=current_record)
def update(self,
table,
record,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
ondelete=DEFAULT,
log=DEFAULT,
message=DEFAULT,
deletable=DEFAULT,
formname=DEFAULT,
**attributes
):
if not (isinstance(table, Table) or table in self.db.tables) \
or (isinstance(record, str) and not str(record).isdigit()):
raise HTTP(404)
if not isinstance(table, Table):
table = self.db[table]
try:
record_id = record.id
except:
record_id = record or 0
if record_id and not self.has_permission('update', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
if not record_id and not self.has_permission('create', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
request = current.request
response = current.response
session = current.session
if request.extension == 'json' and request.vars.json:
request.vars.update(json_parser.loads(request.vars.json))
if next is DEFAULT:
next = request.get_vars._next \
or request.post_vars._next \
or self.settings.update_next
if onvalidation is DEFAULT:
onvalidation = self.settings.update_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.update_onaccept
if ondelete is DEFAULT:
ondelete = self.settings.update_ondelete
if log is DEFAULT:
log = self.messages['update_log']
if deletable is DEFAULT:
deletable = self.settings.update_deletable
if message is DEFAULT:
message = self.messages.record_updated
if not 'hidden' in attributes:
attributes['hidden'] = {}
attributes['hidden']['_next'] = next
form = SQLFORM(
table,
record,
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
deletable=deletable,
upload=self.settings.download_url,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator,
**attributes # contains hidden
)
self.accepted = False
self.deleted = False
captcha = self.settings.update_captcha or self.settings.captcha
if record and captcha:
addrow(form, captcha.label, captcha, captcha.comment,
self.settings.formstyle, 'captcha__row')
captcha = self.settings.create_captcha or self.settings.captcha
if not record and captcha:
addrow(form, captcha.label, captcha, captcha.comment,
self.settings.formstyle, 'captcha__row')
if not request.extension in ('html', 'load'):
(_session, _formname) = (None, None)
else:
(_session, _formname) = (
session, '%s/%s' % (table._tablename, form.record_id))
if not formname is DEFAULT:
_formname = formname
keepvalues = self.settings.keepvalues
if request.vars.delete_this_record:
keepvalues = False
if isinstance(onvalidation, StorageList):
onvalidation = onvalidation.get(table._tablename, [])
if form.accepts(request, _session, formname=_formname,
onvalidation=onvalidation, keepvalues=keepvalues,
hideerror=self.settings.hideerror,
detect_record_change=self.settings.detect_record_change):
self.accepted = True
response.flash = message
if log:
self.log_event(log, form.vars)
if request.vars.delete_this_record:
self.deleted = True
message = self.messages.record_deleted
callback(ondelete, form, table._tablename)
response.flash = message
callback(onaccept, form, table._tablename)
if not request.extension in ('html', 'load'):
raise HTTP(200, 'RECORD CREATED/UPDATED')
if isinstance(next, (list, tuple)): # fix issue with 2.6
next = next[0]
if next: # Only redirect when explicit
next = replace_id(next, form)
session.flash = response.flash
redirect(next)
elif not request.extension in ('html', 'load'):
raise HTTP(401, serializers.json(dict(errors=form.errors)))
return form
def create(self,
table,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
message=DEFAULT,
formname=DEFAULT,
**attributes
):
if next is DEFAULT:
next = self.settings.create_next
if onvalidation is DEFAULT:
onvalidation = self.settings.create_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.create_onaccept
if log is DEFAULT:
log = self.messages['create_log']
if message is DEFAULT:
message = self.messages.record_created
return self.update(
table,
None,
next=next,
onvalidation=onvalidation,
onaccept=onaccept,
log=log,
message=message,
deletable=False,
formname=formname,
**attributes
)
def read(self, table, record):
if not (isinstance(table, Table) or table in self.db.tables) \
or (isinstance(record, str) and not str(record).isdigit()):
raise HTTP(404)
if not isinstance(table, Table):
table = self.db[table]
if not self.has_permission('read', table, record):
redirect(self.settings.auth.settings.on_failed_authorization)
form = SQLFORM(
table,
record,
readonly=True,
comments=False,
upload=self.settings.download_url,
showid=self.settings.showid,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if not current.request.extension in ('html', 'load'):
return table._filter_fields(form.record, id=True)
return form
def delete(self,
table,
record_id,
next=DEFAULT,
message=DEFAULT,
):
if not (isinstance(table, Table) or table in self.db.tables):
raise HTTP(404)
if not isinstance(table, Table):
table = self.db[table]
if not self.has_permission('delete', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
request = current.request
session = current.session
if next is DEFAULT:
next = request.get_vars._next \
or request.post_vars._next \
or self.settings.delete_next
if message is DEFAULT:
message = self.messages.record_deleted
record = table[record_id]
if record:
callback(self.settings.delete_onvalidation, record)
del table[record_id]
callback(self.settings.delete_onaccept, record, table._tablename)
session.flash = message
redirect(next)
def rows(
self,
table,
query=None,
fields=None,
orderby=None,
limitby=None,
):
if not (isinstance(table, Table) or table in self.db.tables):
raise HTTP(404)
if not self.has_permission('select', table):
redirect(self.settings.auth.settings.on_failed_authorization)
#if record_id and not self.has_permission('select', table):
# redirect(self.settings.auth.settings.on_failed_authorization)
if not isinstance(table, Table):
table = self.db[table]
if not query:
query = table.id > 0
if not fields:
fields = [field for field in table if field.readable]
else:
fields = [table[f] if isinstance(f, str) else f for f in fields]
rows = self.db(query).select(*fields, **dict(orderby=orderby,
limitby=limitby))
return rows
def select(self,
table,
query=None,
fields=None,
orderby=None,
limitby=None,
headers=None,
**attr
):
headers = headers or {}
rows = self.rows(table, query, fields, orderby, limitby)
if not rows:
return None # Nicer than an empty table.
if not 'upload' in attr:
attr['upload'] = self.url('download')
if not current.request.extension in ('html', 'load'):
return rows.as_list()
if not headers:
if isinstance(table, str):
table = self.db[table]
headers = dict((str(k), k.label) for k in table)
return SQLTABLE(rows, headers=headers, **attr)
def get_format(self, field):
rtable = field._db[field.type[10:]]
format = rtable.get('_format', None)
if format and isinstance(format, str):
return format[2:-2]
return field.name
def get_query(self, field, op, value, refsearch=False):
try:
if refsearch:
format = self.get_format(field)
if op == 'equals':
if not refsearch:
return field == value
else:
return lambda row: row[field.name][format] == value
elif op == 'not equal':
if not refsearch:
return field != value
else:
return lambda row: row[field.name][format] != value
elif op == 'greater than':
if not refsearch:
return field > value
else:
return lambda row: row[field.name][format] > value
elif op == 'less than':
if not refsearch:
return field < value
else:
return lambda row: row[field.name][format] < value
elif op == 'starts with':
if not refsearch:
return field.like(value + '%')
else:
return lambda row: str(row[field.name][format]).startswith(value)
elif op == 'ends with':
if not refsearch:
return field.like('%' + value)
else:
return lambda row: str(row[field.name][format]).endswith(value)
elif op == 'contains':
if not refsearch:
return field.like('%' + value + '%')
else:
return lambda row: value in row[field.name][format]
except:
return None
def search(self, *tables, **args):
"""
Creates a search form and its results for a table
Examples:
Use as::
form, results = crud.search(db.test,
queries = ['equals', 'not equal', 'contains'],
query_labels={'equals':'Equals',
'not equal':'Not equal'},
fields = ['id','children'],
field_labels = {
'id':'ID','children':'Children'},
zero='Please choose',
query = (db.test.id > 0)&(db.test.id != 3) )
"""
table = tables[0]
fields = args.get('fields', table.fields)
validate = args.get('validate', True)
request = current.request
db = self.db
if not (isinstance(table, Table) or table in db.tables):
raise HTTP(404)
attributes = {}
for key in ('orderby', 'groupby', 'left', 'distinct', 'limitby', 'cache'):
if key in args:
attributes[key] = args[key]
tbl = TABLE()
selected = []
refsearch = []
results = []
showall = args.get('showall', False)
if showall:
selected = fields
chkall = args.get('chkall', False)
if chkall:
for f in fields:
request.vars['chk%s' % f] = 'on'
ops = args.get('queries', [])
zero = args.get('zero', '')
if not ops:
ops = ['equals', 'not equal', 'greater than',
'less than', 'starts with',
'ends with', 'contains']
ops.insert(0, zero)
query_labels = args.get('query_labels', {})
query = args.get('query', table.id > 0)
field_labels = args.get('field_labels', {})
for field in fields:
field = table[field]
if not field.readable:
continue
fieldname = field.name
chkval = request.vars.get('chk' + fieldname, None)
txtval = request.vars.get('txt' + fieldname, None)
opval = request.vars.get('op' + fieldname, None)
row = TR(TD(INPUT(_type="checkbox", _name="chk" + fieldname,
_disabled=(field.type == 'id'),
value=(field.type == 'id' or chkval == 'on'))),
TD(field_labels.get(fieldname, field.label)),
TD(SELECT([OPTION(query_labels.get(op, op),
_value=op) for op in ops],
_name="op" + fieldname,
value=opval)),
TD(INPUT(_type="text", _name="txt" + fieldname,
_value=txtval, _id='txt' + fieldname,
_class=str(field.type))))
tbl.append(row)
if request.post_vars and (chkval or field.type == 'id'):
if txtval and opval != '':
if field.type[0:10] == 'reference ':
refsearch.append(self.get_query(field, opval, txtval, refsearch=True))
elif validate:
value, error = field.validate(txtval)
if not error:
### TODO deal with 'starts with', 'ends with', 'contains' on GAE
query &= self.get_query(field, opval, value)
else:
row[3].append(DIV(error, _class='error'))
else:
query &= self.get_query(field, opval, txtval)
selected.append(field)
form = FORM(tbl, INPUT(_type="submit"))
if selected:
try:
results = db(query).select(*selected, **attributes)
for r in refsearch:
results = results.find(r)
except: # hmmm, we should do better here
results = None
return form, results
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor()))
def fetch(url, data=None, headers=None,
cookie=Cookie.SimpleCookie(),
user_agent='Mozilla/5.0'):
headers = headers or {}
if not data is None:
data = urllib.urlencode(data)
if user_agent:
headers['User-agent'] = user_agent
headers['Cookie'] = ' '.join(
['%s=%s;' % (c.key, c.value) for c in cookie.values()])
try:
from google.appengine.api import urlfetch
except ImportError:
req = urllib2.Request(url, data, headers)
html = urllib2.urlopen(req).read()
else:
method = ((data is None) and urlfetch.GET) or urlfetch.POST
while url is not None:
response = urlfetch.fetch(url=url, payload=data,
method=method, headers=headers,
allow_truncated=False, follow_redirects=False,
deadline=10)
# next request will be a get, so no need to send the data again
data = None
method = urlfetch.GET
# load cookies from the response
cookie.load(response.headers.get('set-cookie', ''))
url = response.headers.get('location')
html = response.content
return html
regex_geocode = \
re.compile(r"""<geometry>[\W]*?<location>[\W]*?<lat>(?P<la>[^<]*)</lat>[\W]*?<lng>(?P<lo>[^<]*)</lng>[\W]*?</location>""")
def geocode(address):
try:
a = urllib.quote(address)
txt = fetch('http://maps.googleapis.com/maps/api/geocode/xml?sensor=false&address=%s'
% a)
item = regex_geocode.search(txt)
(la, lo) = (float(item.group('la')), float(item.group('lo')))
return (la, lo)
except:
return (0.0, 0.0)
def reverse_geocode(lat, lng, lang=None):
""" Try to get an approximate address for a given latitude, longitude. """
if not lang:
lang = current.T.accepted_language
try:
return json_parser.loads(fetch('http://maps.googleapis.com/maps/api/geocode/json?latlng=%(lat)s,%(lng)s&language=%(lang)s' % locals()))['results'][0]['formatted_address']
except:
return ''
def universal_caller(f, *a, **b):
c = f.func_code.co_argcount
n = f.func_code.co_varnames[:c]
defaults = f.func_defaults or []
pos_args = n[0:-len(defaults)]
named_args = n[-len(defaults):]
arg_dict = {}
# Fill the arg_dict with name and value for the submitted, positional values
for pos_index, pos_val in enumerate(a[:c]):
arg_dict[n[pos_index]] = pos_val # n[pos_index] is the name of the argument
# There might be pos_args left, that are sent as named_values. Gather them as well.
# If a argument already is populated with values we simply replaces them.
for arg_name in pos_args[len(arg_dict):]:
if arg_name in b:
arg_dict[arg_name] = b[arg_name]
if len(arg_dict) >= len(pos_args):
# All the positional arguments is found. The function may now be called.
# However, we need to update the arg_dict with the values from the named arguments as well.
for arg_name in named_args:
if arg_name in b:
arg_dict[arg_name] = b[arg_name]
return f(**arg_dict)
# Raise an error, the function cannot be called.
raise HTTP(404, "Object does not exist")
class Service(object):
def __init__(self, environment=None):
self.run_procedures = {}
self.csv_procedures = {}
self.xml_procedures = {}
self.rss_procedures = {}
self.json_procedures = {}
self.jsonrpc_procedures = {}
self.jsonrpc2_procedures = {}
self.xmlrpc_procedures = {}
self.amfrpc_procedures = {}
self.amfrpc3_procedures = {}
self.soap_procedures = {}
def run(self, f):
"""
Example:
Use as::
service = Service()
@service.run
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/run/myfunction?a=3&b=4
"""
self.run_procedures[f.__name__] = f
return f
def csv(self, f):
"""
Example:
Use as::
service = Service()
@service.csv
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/csv/myfunction?a=3&b=4
"""
self.run_procedures[f.__name__] = f
return f
def xml(self, f):
"""
Example:
Use as::
service = Service()
@service.xml
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/xml/myfunction?a=3&b=4
"""
self.run_procedures[f.__name__] = f
return f
def rss(self, f):
"""
Example:
Use as::
service = Service()
@service.rss
def myfunction():
return dict(title=..., link=..., description=...,
created_on=..., entries=[dict(title=..., link=...,
description=..., created_on=...])
def call():
return service()
Then call it with:
wget http://..../app/default/call/rss/myfunction
"""
self.rss_procedures[f.__name__] = f
return f
def json(self, f):
"""
Example:
Use as::
service = Service()
@service.json
def myfunction(a, b):
return [{a: b}]
def call():
return service()
Then call it with:;
wget http://..../app/default/call/json/myfunction?a=hello&b=world
"""
self.json_procedures[f.__name__] = f
return f
def jsonrpc(self, f):
"""
Example:
Use as::
service = Service()
@service.jsonrpc
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget http://..../app/default/call/jsonrpc/myfunction?a=hello&b=world
"""
self.jsonrpc_procedures[f.__name__] = f
return f
def jsonrpc2(self, f):
"""
Example:
Use as::
service = Service()
@service.jsonrpc2
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget --post-data '{"jsonrpc": "2.0", "id": 1, "method": "myfunction", "params": {"a": 1, "b": 2}}' http://..../app/default/call/jsonrpc2
"""
self.jsonrpc2_procedures[f.__name__] = f
return f
def xmlrpc(self, f):
"""
Example:
Use as::
service = Service()
@service.xmlrpc
def myfunction(a, b):
return a + b
def call():
return service()
The call it with:
wget http://..../app/default/call/xmlrpc/myfunction?a=hello&b=world
"""
self.xmlrpc_procedures[f.__name__] = f
return f
def amfrpc(self, f):
"""
Example:
Use as::
service = Service()
@service.amfrpc
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/amfrpc/myfunction?a=hello&b=world
"""
self.amfrpc_procedures[f.__name__] = f
return f
def amfrpc3(self, domain='default'):
"""
Example:
Use as::
service = Service()
@service.amfrpc3('domain')
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget http://..../app/default/call/amfrpc3/myfunction?a=hello&b=world
"""
if not isinstance(domain, str):
raise SyntaxError("AMF3 requires a domain for function")
def _amfrpc3(f):
if domain:
self.amfrpc3_procedures[domain + '.' + f.__name__] = f
else:
self.amfrpc3_procedures[f.__name__] = f
return f
return _amfrpc3
def soap(self, name=None, returns=None, args=None, doc=None):
"""
Example:
Use as::
service = Service()
@service.soap('MyFunction',returns={'result':int},args={'a':int,'b':int,})
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
from gluon.contrib.pysimplesoap.client import SoapClient
client = SoapClient(wsdl="http://..../app/default/call/soap?WSDL")
response = client.MyFunction(a=1,b=2)
return response['result']
It also exposes online generated documentation and xml example messages
at `http://..../app/default/call/soap`
"""
def _soap(f):
self.soap_procedures[name or f.__name__] = f, returns, args, doc
return f
return _soap
def serve_run(self, args=None):
request = current.request
if not args:
args = request.args
if args and args[0] in self.run_procedures:
return str(universal_caller(self.run_procedures[args[0]],
*args[1:], **dict(request.vars)))
self.error()
def serve_csv(self, args=None):
request = current.request
response = current.response
response.headers['Content-Type'] = 'text/x-csv'
if not args:
args = request.args
def none_exception(value):
if isinstance(value, unicode):
return value.encode('utf8')
if hasattr(value, 'isoformat'):
return value.isoformat()[:19].replace('T', ' ')
if value is None:
return '<NULL>'
return value
if args and args[0] in self.run_procedures:
import types
r = universal_caller(self.run_procedures[args[0]],
*args[1:], **dict(request.vars))
s = cStringIO.StringIO()
if hasattr(r, 'export_to_csv_file'):
r.export_to_csv_file(s)
elif r and not isinstance(r, types.GeneratorType) and isinstance(r[0], (dict, Storage)):
import csv
writer = csv.writer(s)
writer.writerow(r[0].keys())
for line in r:
writer.writerow([none_exception(v)
for v in line.values()])
else:
import csv
writer = csv.writer(s)
for line in r:
writer.writerow(line)
return s.getvalue()
self.error()
def serve_xml(self, args=None):
request = current.request
response = current.response
response.headers['Content-Type'] = 'text/xml'
if not args:
args = request.args
if args and args[0] in self.run_procedures:
s = universal_caller(self.run_procedures[args[0]],
*args[1:], **dict(request.vars))
if hasattr(s, 'as_list'):
s = s.as_list()
return serializers.xml(s, quote=False)
self.error()
def serve_rss(self, args=None):
request = current.request
response = current.response
if not args:
args = request.args
if args and args[0] in self.rss_procedures:
feed = universal_caller(self.rss_procedures[args[0]],
*args[1:], **dict(request.vars))
else:
self.error()
response.headers['Content-Type'] = 'application/rss+xml'
return serializers.rss(feed)
def serve_json(self, args=None):
request = current.request
response = current.response
response.headers['Content-Type'] = 'application/json; charset=utf-8'
if not args:
args = request.args
d = dict(request.vars)
if args and args[0] in self.json_procedures:
s = universal_caller(self.json_procedures[args[0]], *args[1:], **d)
if hasattr(s, 'as_list'):
s = s.as_list()
return response.json(s)
self.error()
class JsonRpcException(Exception):
def __init__(self, code, info):
jrpc_error = Service.jsonrpc_errors.get(code)
if jrpc_error:
self.message, self.description = jrpc_error
self.code, self.info = code, info
# jsonrpc 2.0 error types. records the following structure {code: (message,meaning)}
jsonrpc_errors = {
-32700: ("Parse error. Invalid JSON was received by the server.", "An error occurred on the server while parsing the JSON text."),
-32600: ("Invalid Request", "The JSON sent is not a valid Request object."),
-32601: ("Method not found", "The method does not exist / is not available."),
-32602: ("Invalid params", "Invalid method parameter(s)."),
-32603: ("Internal error", "Internal JSON-RPC error."),
-32099: ("Server error", "Reserved for implementation-defined server-errors.")}
def serve_jsonrpc(self):
def return_response(id, result):
return serializers.json({'version': '1.1',
'id': id, 'result': result, 'error': None})
def return_error(id, code, message, data=None):
error = {'name': 'JSONRPCError',
'code': code, 'message': message}
if data is not None:
error['data'] = data
return serializers.json({'id': id,
'version': '1.1',
'error': error,
})
request = current.request
response = current.response
response.headers['Content-Type'] = 'application/json; charset=utf-8'
methods = self.jsonrpc_procedures
data = json_parser.loads(request.body.read())
jsonrpc_2 = data.get('jsonrpc')
if jsonrpc_2: #hand over to version 2 of the protocol
return self.serve_jsonrpc2(data)
id, method, params = data.get('id'), data.get('method'), data.get('params', [])
if id is None:
return return_error(0, 100, 'missing id')
if not method in methods:
return return_error(id, 100, 'method "%s" does not exist' % method)
try:
if isinstance(params, dict):
s = methods[method](**params)
else:
s = methods[method](*params)
if hasattr(s, 'as_list'):
s = s.as_list()
return return_response(id, s)
except Service.JsonRpcException, e:
return return_error(id, e.code, e.info)
except:
etype, eval, etb = sys.exc_info()
message = '%s: %s' % (etype.__name__, eval)
data = request.is_local and traceback.format_tb(etb)
logger.warning('jsonrpc exception %s\n%s' % (message, traceback.format_tb(etb)))
return return_error(id, 100, message, data)
def serve_jsonrpc2(self, data=None, batch_element=False):
def return_response(id, result):
if not must_respond:
return None
return serializers.json({'jsonrpc': '2.0',
'id': id, 'result': result})
def return_error(id, code, message=None, data=None):
error = {'code': code}
if Service.jsonrpc_errors.has_key(code):
error['message'] = Service.jsonrpc_errors[code][0]
error['data'] = Service.jsonrpc_errors[code][1]
if message is not None:
error['message'] = message
if data is not None:
error['data'] = data
return serializers.json({'jsonrpc': '2.0',
'id': id,
'error': error})
def validate(data):
"""
Validate request as defined in: http://www.jsonrpc.org/specification#request_object.
Args:
data(str): The json object.
Returns:
- True -- if successful
- False -- if no error should be reported (i.e. data is missing 'id' member)
Raises:
JsonRPCException
"""
iparms = set(data.keys())
mandatory_args = set(['jsonrpc', 'method'])
missing_args = mandatory_args - iparms
if missing_args:
raise Service.JsonRpcException(-32600, 'Missing arguments %s.' % list(missing_args))
if data['jsonrpc'] != '2.0':
raise Service.JsonRpcException(-32603, 'Unsupported jsonrpc version "%s"' % data['jsonrpc'])
if 'id' not in iparms:
return False
return True
request = current.request
response = current.response
if not data:
response.headers['Content-Type'] = 'application/json; charset=utf-8'
try:
data = json_parser.loads(request.body.read())
except ValueError: # decoding error in json lib
return return_error(None, -32700)
# Batch handling
if isinstance(data, list) and not batch_element:
retlist = []
for c in data:
retstr = self.serve_jsonrpc2(c, batch_element=True)
if retstr: # do not add empty responses
retlist.append(retstr)
if len(retlist) == 0: # return nothing
return ''
else:
return "[" + ','.join(retlist) + "]"
methods = self.jsonrpc2_procedures
methods.update(self.jsonrpc_procedures)
try:
must_respond = validate(data)
except Service.JsonRpcException, e:
return return_error(None, e.code, e.info)
id, method, params = data.get('id'), data['method'], data.get('params', '')
if not method in methods:
return return_error(id, -32601, data='Method "%s" does not exist' % method)
try:
if isinstance(params, dict):
s = methods[method](**params)
else:
s = methods[method](*params)
if hasattr(s, 'as_list'):
s = s.as_list()
if must_respond:
return return_response(id, s)
else:
return ''
except HTTP, e:
raise e
except Service.JsonRpcException, e:
return return_error(id, e.code, e.info)
except:
etype, eval, etb = sys.exc_info()
data = '%s: %s\n' % (etype.__name__, eval) + str(request.is_local and traceback.format_tb(etb))
logger.warning('%s: %s\n%s' % (etype.__name__, eval, traceback.format_tb(etb)))
return return_error(id, -32099, data=data)
def serve_xmlrpc(self):
request = current.request
response = current.response
services = self.xmlrpc_procedures.values()
return response.xmlrpc(request, services)
def serve_amfrpc(self, version=0):
try:
import pyamf
import pyamf.remoting.gateway
except:
return "pyamf not installed or not in Python sys.path"
request = current.request
response = current.response
if version == 3:
services = self.amfrpc3_procedures
base_gateway = pyamf.remoting.gateway.BaseGateway(services)
pyamf_request = pyamf.remoting.decode(request.body)
else:
services = self.amfrpc_procedures
base_gateway = pyamf.remoting.gateway.BaseGateway(services)
context = pyamf.get_context(pyamf.AMF0)
pyamf_request = pyamf.remoting.decode(request.body, context)
pyamf_response = pyamf.remoting.Envelope(pyamf_request.amfVersion)
for name, message in pyamf_request:
pyamf_response[name] = base_gateway.getProcessor(message)(message)
response.headers['Content-Type'] = pyamf.remoting.CONTENT_TYPE
if version == 3:
return pyamf.remoting.encode(pyamf_response).getvalue()
else:
return pyamf.remoting.encode(pyamf_response, context).getvalue()
def serve_soap(self, version="1.1"):
try:
from gluon.contrib.pysimplesoap.server import SoapDispatcher
except:
return "pysimplesoap not installed in contrib"
request = current.request
response = current.response
procedures = self.soap_procedures
location = "%s://%s%s" % (
request.env.wsgi_url_scheme,
request.env.http_host,
URL(r=request, f="call/soap", vars={}))
namespace = 'namespace' in response and response.namespace or location
documentation = response.description or ''
dispatcher = SoapDispatcher(
name=response.title,
location=location,
action=location, # SOAPAction
namespace=namespace,
prefix='pys',
documentation=documentation,
ns=True)
for method, (function, returns, args, doc) in procedures.iteritems():
dispatcher.register_function(method, function, returns, args, doc)
if request.env.request_method == 'POST':
fault = {}
# Process normal Soap Operation
response.headers['Content-Type'] = 'text/xml'
xml = dispatcher.dispatch(request.body.read(), fault=fault)
if fault:
# May want to consider populating a ticket here...
response.status = 500
# return the soap response
return xml
elif 'WSDL' in request.vars:
# Return Web Service Description
response.headers['Content-Type'] = 'text/xml'
return dispatcher.wsdl()
elif 'op' in request.vars:
# Return method help webpage
response.headers['Content-Type'] = 'text/html'
method = request.vars['op']
sample_req_xml, sample_res_xml, doc = dispatcher.help(method)
body = [H1("Welcome to Web2Py SOAP webservice gateway"),
A("See all webservice operations",
_href=URL(r=request, f="call/soap", vars={})),
H2(method),
P(doc),
UL(LI("Location: %s" % dispatcher.location),
LI("Namespace: %s" % dispatcher.namespace),
LI("SoapAction: %s" % dispatcher.action),
),
H3("Sample SOAP XML Request Message:"),
CODE(sample_req_xml, language="xml"),
H3("Sample SOAP XML Response Message:"),
CODE(sample_res_xml, language="xml"),
]
return {'body': body}
else:
# Return general help and method list webpage
response.headers['Content-Type'] = 'text/html'
body = [H1("Welcome to Web2Py SOAP webservice gateway"),
P(response.description),
P("The following operations are available"),
A("See WSDL for webservice description",
_href=URL(r=request, f="call/soap", vars={"WSDL":None})),
UL([LI(A("%s: %s" % (method, doc or ''),
_href=URL(r=request, f="call/soap", vars={'op': method})))
for method, doc in dispatcher.list_methods()]),
]
return {'body': body}
def __call__(self):
"""
Registers services with::
service = Service()
@service.run
@service.rss
@service.json
@service.jsonrpc
@service.xmlrpc
@service.amfrpc
@service.amfrpc3('domain')
@service.soap('Method', returns={'Result':int}, args={'a':int,'b':int,})
Exposes services with::
def call():
return service()
You can call services with::
http://..../app/default/call/run?[parameters]
http://..../app/default/call/rss?[parameters]
http://..../app/default/call/json?[parameters]
http://..../app/default/call/jsonrpc
http://..../app/default/call/xmlrpc
http://..../app/default/call/amfrpc
http://..../app/default/call/amfrpc3
http://..../app/default/call/soap
"""
request = current.request
if len(request.args) < 1:
raise HTTP(404, "Not Found")
arg0 = request.args(0)
if arg0 == 'run':
return self.serve_run(request.args[1:])
elif arg0 == 'rss':
return self.serve_rss(request.args[1:])
elif arg0 == 'csv':
return self.serve_csv(request.args[1:])
elif arg0 == 'xml':
return self.serve_xml(request.args[1:])
elif arg0 == 'json':
return self.serve_json(request.args[1:])
elif arg0 == 'jsonrpc':
return self.serve_jsonrpc()
elif arg0 == 'jsonrpc2':
return self.serve_jsonrpc2()
elif arg0 == 'xmlrpc':
return self.serve_xmlrpc()
elif arg0 == 'amfrpc':
return self.serve_amfrpc()
elif arg0 == 'amfrpc3':
return self.serve_amfrpc(3)
elif arg0 == 'soap':
return self.serve_soap()
else:
self.error()
def error(self):
raise HTTP(404, "Object does not exist")
def completion(callback):
"""
Executes a task on completion of the called action.
Example:
Use as::
from gluon.tools import completion
@completion(lambda d: logging.info(repr(d)))
def index():
return dict(message='hello')
It logs the output of the function every time input is called.
The argument of completion is executed in a new thread.
"""
def _completion(f):
def __completion(*a, **b):
d = None
try:
d = f(*a, **b)
return d
finally:
thread.start_new_thread(callback, (d,))
return __completion
return _completion
def prettydate(d, T=lambda x: x):
if isinstance(d, datetime.datetime):
dt = datetime.datetime.now() - d
elif isinstance(d, datetime.date):
dt = datetime.date.today() - d
elif not d:
return ''
else:
return '[invalid date]'
if dt.days < 0:
suffix = ' from now'
dt = -dt
else:
suffix = ' ago'
if dt.days >= 2 * 365:
return T('%d years' + suffix) % int(dt.days / 365)
elif dt.days >= 365:
return T('1 year' + suffix)
elif dt.days >= 60:
return T('%d months' + suffix) % int(dt.days / 30)
elif dt.days > 21:
return T('1 month' + suffix)
elif dt.days >= 14:
return T('%d weeks' + suffix) % int(dt.days / 7)
elif dt.days >= 7:
return T('1 week' + suffix)
elif dt.days > 1:
return T('%d days' + suffix) % dt.days
elif dt.days == 1:
return T('1 day' + suffix)
elif dt.seconds >= 2 * 60 * 60:
return T('%d hours' + suffix) % int(dt.seconds / 3600)
elif dt.seconds >= 60 * 60:
return T('1 hour' + suffix)
elif dt.seconds >= 2 * 60:
return T('%d minutes' + suffix) % int(dt.seconds / 60)
elif dt.seconds >= 60:
return T('1 minute' + suffix)
elif dt.seconds > 1:
return T('%d seconds' + suffix) % dt.seconds
elif dt.seconds == 1:
return T('1 second' + suffix)
else:
return T('now')
def test_thread_separation():
def f():
c = PluginManager()
lock1.acquire()
lock2.acquire()
c.x = 7
lock1.release()
lock2.release()
lock1 = thread.allocate_lock()
lock2 = thread.allocate_lock()
lock1.acquire()
thread.start_new_thread(f, ())
a = PluginManager()
a.x = 5
lock1.release()
lock2.acquire()
return a.x
class PluginManager(object):
"""
Plugin Manager is similar to a storage object but it is a single level
singleton. This means that multiple instances within the same thread share
the same attributes.
Its constructor is also special. The first argument is the name of the
plugin you are defining.
The named arguments are parameters needed by the plugin with default values.
If the parameters were previous defined, the old values are used.
Example:
in some general configuration file::
plugins = PluginManager()
plugins.me.param1=3
within the plugin model::
_ = PluginManager('me',param1=5,param2=6,param3=7)
where the plugin is used::
>>> print plugins.me.param1
3
>>> print plugins.me.param2
6
>>> plugins.me.param3 = 8
>>> print plugins.me.param3
8
Here are some tests::
>>> a=PluginManager()
>>> a.x=6
>>> b=PluginManager('check')
>>> print b.x
6
>>> b=PluginManager() # reset settings
>>> print b.x
<Storage {}>
>>> b.x=7
>>> print a.x
7
>>> a.y.z=8
>>> print b.y.z
8
>>> test_thread_separation()
5
>>> plugins=PluginManager('me',db='mydb')
>>> print plugins.me.db
mydb
>>> print 'me' in plugins
True
>>> print plugins.me.installed
True
"""
instances = {}
def __new__(cls, *a, **b):
id = thread.get_ident()
lock = thread.allocate_lock()
try:
lock.acquire()
try:
return cls.instances[id]
except KeyError:
instance = object.__new__(cls, *a, **b)
cls.instances[id] = instance
return instance
finally:
lock.release()
def __init__(self, plugin=None, **defaults):
if not plugin:
self.__dict__.clear()
settings = self.__getattr__(plugin)
settings.installed = True
settings.update(
(k, v) for k, v in defaults.items() if not k in settings)
def __getattr__(self, key):
if not key in self.__dict__:
self.__dict__[key] = Storage()
return self.__dict__[key]
def keys(self):
return self.__dict__.keys()
def __contains__(self, key):
return key in self.__dict__
class Expose(object):
def __init__(self, base=None, basename=None, extensions=None, allow_download=True):
"""
Examples:
Use as::
def static():
return dict(files=Expose())
or::
def static():
path = os.path.join(request.folder,'static','public')
return dict(files=Expose(path,basename='public'))
Args:
extensions: an optional list of file extensions for filtering
displayed files: e.g. `['.py', '.jpg']`
allow_download: whether to allow downloading selected files
"""
current.session.forget()
base = base or os.path.join(current.request.folder, 'static')
basename = basename or current.request.function
self.basename = basename
if current.request.raw_args:
self.args = [arg for arg in current.request.raw_args.split('/') if arg]
else:
self.args = [arg for arg in current.request.args if arg]
filename = os.path.join(base, *self.args)
if not os.path.exists(filename):
raise HTTP(404, "FILE NOT FOUND")
if not os.path.normpath(filename).startswith(base):
raise HTTP(401, "NOT AUTHORIZED")
if allow_download and not os.path.isdir(filename):
current.response.headers['Content-Type'] = contenttype(filename)
raise HTTP(200, open(filename, 'rb'), **current.response.headers)
self.path = path = os.path.join(filename, '*')
self.folders = [f[len(path) - 1:] for f in sorted(glob.glob(path))
if os.path.isdir(f) and not self.isprivate(f)]
self.filenames = [f[len(path) - 1:] for f in sorted(glob.glob(path))
if not os.path.isdir(f) and not self.isprivate(f)]
if 'README' in self.filenames:
readme = open(os.path.join(filename, 'README')).read()
self.paragraph = MARKMIN(readme)
else:
self.paragraph = None
if extensions:
self.filenames = [f for f in self.filenames
if os.path.splitext(f)[-1] in extensions]
def breadcrumbs(self, basename):
path = []
span = SPAN()
span.append(A(basename, _href=URL()))
for arg in self.args:
span.append('/')
path.append(arg)
span.append(A(arg, _href=URL(args='/'.join(path))))
return span
def table_folders(self):
if self.folders:
return SPAN(H3('Folders'), TABLE(
*[TR(TD(A(folder, _href=URL(args=self.args + [folder]))))
for folder in self.folders],
**dict(_class="table")))
return ''
@staticmethod
def isprivate(f):
return 'private' in f or f.startswith('.') or f.endswith('~')
@staticmethod
def isimage(f):
return os.path.splitext(f)[-1].lower() in (
'.png', '.jpg', '.jpeg', '.gif', '.tiff')
def table_files(self, width=160):
if self.filenames:
return SPAN(H3('Files'),
TABLE(*[TR(TD(A(f, _href=URL(args=self.args + [f]))),
TD(IMG(_src=URL(args=self.args + [f]),
_style='max-width:%spx' % width)
if width and self.isimage(f) else ''))
for f in self.filenames],
**dict(_class="table")))
return ''
def xml(self):
return DIV(
H2(self.breadcrumbs(self.basename)),
self.paragraph or '',
self.table_folders(),
self.table_files()).xml()
class Wiki(object):
everybody = 'everybody'
rows_page = 25
def markmin_base(self, body):
return MARKMIN(body, extra=self.settings.extra,
url=True, environment=self.env,
autolinks=lambda link: expand_one(link, {})).xml()
def render_tags(self, tags):
return DIV(
_class='w2p_wiki_tags',
*[A(t.strip(), _href=URL(args='_search', vars=dict(q=t)))
for t in tags or [] if t.strip()])
def markmin_render(self, page):
return self.markmin_base(page.body) + self.render_tags(page.tags).xml()
def html_render(self, page):
html = page.body
# @///function -> http://..../function
html = replace_at_urls(html, URL)
# http://...jpg -> <img src="http://...jpg/> or embed
html = replace_autolinks(html, lambda link: expand_one(link, {}))
# @{component:name} -> <script>embed component name</script>
html = replace_components(html, self.env)
html = html + self.render_tags(page.tags).xml()
return html
@staticmethod
def component(text):
"""
In wiki docs allows `@{component:controller/function/args}`
which renders as a `LOAD(..., ajax=True)`
"""
items = text.split('/')
controller, function, args = items[0], items[1], items[2:]
return LOAD(controller, function, args=args, ajax=True).xml()
def get_renderer(self):
if isinstance(self.settings.render, basestring):
r = getattr(self, "%s_render" % self.settings.render)
elif callable(self.settings.render):
r = self.settings.render
elif isinstance(self.settings.render, dict):
def custom_render(page):
if page.render:
if page.render in self.settings.render.keys():
my_render = self.settings.render[page.render]
else:
my_render = getattr(self, "%s_render" % page.render)
else:
my_render = self.markmin_render
return my_render(page)
r = custom_render
else:
raise ValueError(
"Invalid render type %s" % type(self.settings.render))
return r
def __init__(self, auth, env=None, render='markmin',
manage_permissions=False, force_prefix='',
restrict_search=False, extra=None,
menu_groups=None, templates=None, migrate=True,
controller=None, function=None, groups=None):
settings = self.settings = auth.settings.wiki
"""
Args:
render:
- "markmin"
- "html"
- `<function>` : Sets a custom render function
- `dict(html=<function>, markmin=...)`: dict(...) allows
multiple custom render functions
- "multiple" : Is the same as `{}`. It enables per-record
formats using builtins
"""
engines = set(['markmin', 'html'])
show_engine = False
if render == "multiple":
render = {}
if isinstance(render, dict):
[engines.add(key) for key in render]
show_engine = True
settings.render = render
perms = settings.manage_permissions = manage_permissions
settings.force_prefix = force_prefix
settings.restrict_search = restrict_search
settings.extra = extra or {}
settings.menu_groups = menu_groups
settings.templates = templates
settings.controller = controller
settings.function = function
settings.groups = auth.user_groups.values() \
if groups is None else groups
db = auth.db
self.env = env or {}
self.env['component'] = Wiki.component
self.auth = auth
self.wiki_menu_items = None
if self.auth.user:
self.settings.force_prefix = force_prefix % self.auth.user
else:
self.settings.force_prefix = force_prefix
self.host = current.request.env.http_host
table_definitions = [
('wiki_page', {
'args': [
Field('slug',
requires=[IS_SLUG(),
IS_NOT_IN_DB(db, 'wiki_page.slug')],
writable=False),
Field('title', length=255, unique=True),
Field('body', 'text', notnull=True),
Field('tags', 'list:string'),
Field('can_read', 'list:string',
writable=perms,
readable=perms,
default=[Wiki.everybody]),
Field('can_edit', 'list:string',
writable=perms, readable=perms,
default=[Wiki.everybody]),
Field('changelog'),
Field('html', 'text',
compute=self.get_renderer(),
readable=False, writable=False),
Field('render', default="markmin",
readable=show_engine,
writable=show_engine,
requires=IS_EMPTY_OR(
IS_IN_SET(engines))),
auth.signature],
'vars': {'format': '%(title)s', 'migrate': migrate}}),
('wiki_tag', {
'args': [
Field('name'),
Field('wiki_page', 'reference wiki_page'),
auth.signature],
'vars':{'format': '%(title)s', 'migrate': migrate}}),
('wiki_media', {
'args': [
Field('wiki_page', 'reference wiki_page'),
Field('title', required=True),
Field('filename', 'upload', required=True),
auth.signature],
'vars': {'format': '%(title)s', 'migrate': migrate}}),
]
# define only non-existent tables
for key, value in table_definitions:
args = []
if not key in db.tables():
# look for wiki_ extra fields in auth.settings
extra_fields = auth.settings.extra_fields
if extra_fields:
if key in extra_fields:
if extra_fields[key]:
for field in extra_fields[key]:
args.append(field)
args += value['args']
db.define_table(key, *args, **value['vars'])
if self.settings.templates is None and not \
self.settings.manage_permissions:
self.settings.templates = db.wiki_page.tags.contains('template') & \
db.wiki_page.can_read.contains('everybody')
def update_tags_insert(page, id, db=db):
for tag in page.tags or []:
tag = tag.strip().lower()
if tag:
db.wiki_tag.insert(name=tag, wiki_page=id)
def update_tags_update(dbset, page, db=db):
page = dbset.select(limitby=(0, 1)).first()
db(db.wiki_tag.wiki_page == page.id).delete()
for tag in page.tags or []:
tag = tag.strip().lower()
if tag:
db.wiki_tag.insert(name=tag, wiki_page=page.id)
db.wiki_page._after_insert.append(update_tags_insert)
db.wiki_page._after_update.append(update_tags_update)
if (auth.user and
check_credentials(current.request, gae_login=False) and
not 'wiki_editor' in auth.user_groups.values() and
self.settings.groups == auth.user_groups.values()):
group = db.auth_group(role='wiki_editor')
gid = group.id if group else db.auth_group.insert(
role='wiki_editor')
auth.add_membership(gid)
settings.lock_keys = True
# WIKI ACCESS POLICY
def not_authorized(self, page=None):
raise HTTP(401)
def can_read(self, page):
if 'everybody' in page.can_read or not \
self.settings.manage_permissions:
return True
elif self.auth.user:
groups = self.settings.groups
if ('wiki_editor' in groups or
set(groups).intersection(set(page.can_read + page.can_edit)) or
page.created_by == self.auth.user.id):
return True
return False
def can_edit(self, page=None):
if not self.auth.user:
redirect(self.auth.settings.login_url)
groups = self.settings.groups
return ('wiki_editor' in groups or
(page is None and 'wiki_author' in groups) or
not page is None and (
set(groups).intersection(set(page.can_edit)) or
page.created_by == self.auth.user.id))
def can_manage(self):
if not self.auth.user:
return False
groups = self.settings.groups
return 'wiki_editor' in groups
def can_search(self):
return True
def can_see_menu(self):
if self.auth.user:
if self.settings.menu_groups is None:
return True
else:
groups = self.settings.groups
if any(t in self.settings.menu_groups for t in groups):
return True
return False
### END POLICY
def automenu(self):
"""adds the menu if not present"""
if (not self.wiki_menu_items and
self.settings.controller and
self.settings.function):
self.wiki_menu_items = self.menu(self.settings.controller,
self.settings.function)
current.response.menu += self.wiki_menu_items
def __call__(self):
request = current.request
settings = self.settings
settings.controller = settings.controller or request.controller
settings.function = settings.function or request.function
self.automenu()
zero = request.args(0) or 'index'
if zero and zero.isdigit():
return self.media(int(zero))
elif not zero or not zero.startswith('_'):
return self.read(zero)
elif zero == '_edit':
return self.edit(request.args(1) or 'index', request.args(2) or 0)
elif zero == '_editmedia':
return self.editmedia(request.args(1) or 'index')
elif zero == '_create':
return self.create()
elif zero == '_pages':
return self.pages()
elif zero == '_search':
return self.search()
elif zero == '_recent':
ipage = int(request.vars.page or 0)
query = self.auth.db.wiki_page.created_by == request.args(
1, cast=int)
return self.search(query=query,
orderby=~self.auth.db.wiki_page.created_on,
limitby=(ipage * self.rows_page,
(ipage + 1) * self.rows_page),
)
elif zero == '_cloud':
return self.cloud()
elif zero == '_preview':
return self.preview(self.get_renderer())
def first_paragraph(self, page):
if not self.can_read(page):
mm = (page.body or '').replace('\r', '')
ps = [p for p in mm.split('\n\n')
if not p.startswith('#') and p.strip()]
if ps:
return ps[0]
return ''
def fix_hostname(self, body):
return (body or '').replace('://HOSTNAME', '://%s' % self.host)
def read(self, slug, force_render=False):
if slug in '_cloud':
return self.cloud()
elif slug in '_search':
return self.search()
page = self.auth.db.wiki_page(slug=slug)
if page and (not self.can_read(page)):
return self.not_authorized(page)
if current.request.extension == 'html':
if not page:
url = URL(args=('_create', slug))
return dict(content=A('Create page "%s"' % slug, _href=url, _class="btn"))
else:
html = page.html if not force_render else self.get_renderer()(page)
content = XML(self.fix_hostname(html))
return dict(title=page.title,
slug=page.slug,
page=page,
content=content,
tags=page.tags,
created_on=page.created_on,
modified_on=page.modified_on)
elif current.request.extension == 'load':
return self.fix_hostname(page.html) if page else ''
else:
if not page:
raise HTTP(404)
else:
return dict(title=page.title,
slug=page.slug,
page=page,
content=page.body,
tags=page.tags,
created_on=page.created_on,
modified_on=page.modified_on)
def edit(self, slug, from_template=0):
auth = self.auth
db = auth.db
page = db.wiki_page(slug=slug)
if not self.can_edit(page):
return self.not_authorized(page)
title_guess = ' '.join(c.capitalize() for c in slug.split('-'))
if not page:
if not (self.can_manage() or
slug.startswith(self.settings.force_prefix)):
current.session.flash = 'slug must have "%s" prefix' \
% self.settings.force_prefix
redirect(URL(args=('_create')))
db.wiki_page.can_read.default = [Wiki.everybody]
db.wiki_page.can_edit.default = [auth.user_group_role()]
db.wiki_page.title.default = title_guess
db.wiki_page.slug.default = slug
if slug == 'wiki-menu':
db.wiki_page.body.default = \
'- Menu Item > @////index\n- - Submenu > http://web2py.com'
else:
db.wiki_page.body.default = db(db.wiki_page.id == from_template).select(db.wiki_page.body)[0].body \
if int(from_template) > 0 else '## %s\n\npage content' % title_guess
vars = current.request.post_vars
if vars.body:
vars.body = vars.body.replace('://%s' % self.host, '://HOSTNAME')
form = SQLFORM(db.wiki_page, page, deletable=True,
formstyle='table2cols', showid=False).process()
if form.deleted:
current.session.flash = 'page deleted'
redirect(URL())
elif form.accepted:
current.session.flash = 'page created'
redirect(URL(args=slug))
script = """
jQuery(function() {
if (!jQuery('#wiki_page_body').length) return;
var pagecontent = jQuery('#wiki_page_body');
pagecontent.css('font-family',
'Monaco,Menlo,Consolas,"Courier New",monospace');
var prevbutton = jQuery('<button class="btn nopreview">Preview</button>');
var preview = jQuery('<div id="preview"></div>').hide();
var previewmedia = jQuery('<div id="previewmedia"></div>');
var form = pagecontent.closest('form');
preview.insertBefore(form);
prevbutton.insertBefore(form);
if(%(link_media)s) {
var mediabutton = jQuery('<button class="btn nopreview">Media</button>');
mediabutton.insertBefore(form);
previewmedia.insertBefore(form);
mediabutton.click(function() {
if (mediabutton.hasClass('nopreview')) {
web2py_component('%(urlmedia)s', 'previewmedia');
} else {
previewmedia.empty();
}
mediabutton.toggleClass('nopreview');
});
}
prevbutton.click(function(e) {
e.preventDefault();
if (prevbutton.hasClass('nopreview')) {
prevbutton.addClass('preview').removeClass(
'nopreview').html('Edit Source');
try{var wiki_render = jQuery('#wiki_page_render').val()}
catch(e){var wiki_render = null;}
web2py_ajax_page('post', \
'%(url)s', {body: jQuery('#wiki_page_body').val(), \
render: wiki_render}, 'preview');
form.fadeOut('fast', function() {preview.fadeIn()});
} else {
prevbutton.addClass(
'nopreview').removeClass('preview').html('Preview');
preview.fadeOut('fast', function() {form.fadeIn()});
}
})
})
""" % dict(url=URL(args=('_preview', slug)), link_media=('true' if page else 'false'),
urlmedia=URL(extension='load',
args=('_editmedia', slug),
vars=dict(embedded=1)))
return dict(content=TAG[''](form, SCRIPT(script)))
def editmedia(self, slug):
auth = self.auth
db = auth.db
page = db.wiki_page(slug=slug)
if not (page and self.can_edit(page)):
return self.not_authorized(page)
self.auth.db.wiki_media.id.represent = lambda id, row: \
id if not row.filename else \
SPAN('@////%i/%s.%s' % (id, IS_SLUG.urlify(row.title.split('.')[0]), row.filename.split('.')[-1]))
self.auth.db.wiki_media.wiki_page.default = page.id
self.auth.db.wiki_media.wiki_page.writable = False
links = []
csv = True
create = True
if current.request.vars.embedded:
script = "var c = jQuery('#wiki_page_body'); c.val(c.val() + jQuery('%s').text()); return false;"
fragment = self.auth.db.wiki_media.id.represent
csv = False
create = False
links= [
lambda row:
A('copy into source', _href='#', _onclick=script % (fragment(row.id, row)))
]
content = SQLFORM.grid(
self.auth.db.wiki_media.wiki_page == page.id,
orderby=self.auth.db.wiki_media.title,
links=links,
csv=csv,
create=create,
args=['_editmedia', slug],
user_signature=False)
return dict(content=content)
def create(self):
if not self.can_edit():
return self.not_authorized()
db = self.auth.db
slugs = db(db.wiki_page.id > 0).select(db.wiki_page.id, db.wiki_page.slug)
options = [OPTION(row.slug, _value=row.id) for row in slugs]
options.insert(0, OPTION('', _value=''))
fields = [Field("slug", default=current.request.args(1) or
self.settings.force_prefix,
requires=(IS_SLUG(), IS_NOT_IN_DB(db, db.wiki_page.slug))),]
if self.settings.templates:
fields.append(
Field("from_template", "reference wiki_page",
requires=IS_EMPTY_OR(
IS_IN_DB(db(self.settings.templates),
db.wiki_page._id,
'%(slug)s')),
comment=current.T(
"Choose Template or empty for new Page")))
form = SQLFORM.factory(*fields, **dict(_class="well"))
form.element("[type=submit]").attributes["_value"] = \
current.T("Create Page from Slug")
if form.process().accepted:
form.vars.from_template = 0 if not form.vars.from_template \
else form.vars.from_template
redirect(URL(args=('_edit', form.vars.slug, form.vars.from_template or 0))) # added param
return dict(content=form)
def pages(self):
if not self.can_manage():
return self.not_authorized()
self.auth.db.wiki_page.slug.represent = lambda slug, row: SPAN(
'@////%s' % slug)
self.auth.db.wiki_page.title.represent = lambda title, row: \
A(title, _href=URL(args=row.slug))
wiki_table = self.auth.db.wiki_page
content = SQLFORM.grid(
wiki_table,
fields=[wiki_table.slug,
wiki_table.title, wiki_table.tags,
wiki_table.can_read, wiki_table.can_edit],
links=[
lambda row:
A('edit', _href=URL(args=('_edit', row.slug)), _class='btn'),
lambda row:
A('media', _href=URL(args=('_editmedia', row.slug)), _class='btn')],
details=False, editable=False, deletable=False, create=False,
orderby=self.auth.db.wiki_page.title,
args=['_pages'],
user_signature=False)
return dict(content=content)
def media(self, id):
request, response, db = current.request, current.response, self.auth.db
media = db.wiki_media(id)
if media:
if self.settings.manage_permissions:
page = db.wiki_page(media.wiki_page)
if not self.can_read(page):
return self.not_authorized(page)
request.args = [media.filename]
m = response.download(request, db)
current.session.forget() # get rid of the cookie
response.headers['Last-Modified'] = \
request.utcnow.strftime("%a, %d %b %Y %H:%M:%S GMT")
if 'Content-Disposition' in response.headers:
del response.headers['Content-Disposition']
response.headers['Pragma'] = 'cache'
response.headers['Cache-Control'] = 'private'
return m
else:
raise HTTP(404)
def menu(self, controller='default', function='index'):
db = self.auth.db
request = current.request
menu_page = db.wiki_page(slug='wiki-menu')
menu = []
if menu_page:
tree = {'': menu}
regex = re.compile('[\r\n\t]*(?P<base>(\s*\-\s*)+)(?P<title>\w.*?)\s+\>\s+(?P<link>\S+)')
for match in regex.finditer(self.fix_hostname(menu_page.body)):
base = match.group('base').replace(' ', '')
title = match.group('title')
link = match.group('link')
title_page = None
if link.startswith('@'):
items = link[2:].split('/')
if len(items) > 3:
title_page = items[3]
link = URL(a=items[0] or None, c=items[1] or controller,
f=items[2] or function, args=items[3:])
parent = tree.get(base[1:], tree[''])
subtree = []
tree[base] = subtree
parent.append((current.T(title),
request.args(0) == title_page,
link, subtree))
if self.can_see_menu():
submenu = []
menu.append((current.T('[Wiki]'), None, None, submenu))
if URL() == URL(controller, function):
if not str(request.args(0)).startswith('_'):
slug = request.args(0) or 'index'
mode = 1
elif request.args(0) == '_edit':
slug = request.args(1) or 'index'
mode = 2
elif request.args(0) == '_editmedia':
slug = request.args(1) or 'index'
mode = 3
else:
mode = 0
if mode in (2, 3):
submenu.append((current.T('View Page'), None,
URL(controller, function, args=slug)))
if mode in (1, 3):
submenu.append((current.T('Edit Page'), None,
URL(controller, function, args=('_edit', slug))))
if mode in (1, 2):
submenu.append((current.T('Edit Page Media'), None,
URL(controller, function, args=('_editmedia', slug))))
submenu.append((current.T('Create New Page'), None,
URL(controller, function, args=('_create'))))
# Moved next if to inside self.auth.user check
if self.can_manage():
submenu.append((current.T('Manage Pages'), None,
URL(controller, function, args=('_pages'))))
submenu.append((current.T('Edit Menu'), None,
URL(controller, function, args=('_edit', 'wiki-menu'))))
# Also moved inside self.auth.user check
submenu.append((current.T('Search Pages'), None,
URL(controller, function, args=('_search'))))
return menu
def search(self, tags=None, query=None, cloud=True, preview=True,
limitby=(0, 100), orderby=None):
if not self.can_search():
return self.not_authorized()
request = current.request
content = CAT()
if tags is None and query is None:
form = FORM(INPUT(_name='q', requires=IS_NOT_EMPTY(),
value=request.vars.q),
INPUT(_type="submit", _value=current.T('Search')),
_method='GET')
content.append(DIV(form, _class='w2p_wiki_form'))
if request.vars.q:
tags = [v.strip() for v in request.vars.q.split(',')]
tags = [v.lower() for v in tags if v]
if tags or not query is None:
db = self.auth.db
count = db.wiki_tag.wiki_page.count()
fields = [db.wiki_page.id, db.wiki_page.slug,
db.wiki_page.title, db.wiki_page.tags,
db.wiki_page.can_read]
if preview:
fields.append(db.wiki_page.body)
if query is None:
query = (db.wiki_page.id == db.wiki_tag.wiki_page) &\
(db.wiki_tag.name.belongs(tags))
query = query | db.wiki_page.title.contains(request.vars.q)
if self.settings.restrict_search and not self.manage():
query = query & (db.wiki_page.created_by == self.auth.user_id)
pages = db(query).select(count,
*fields, **dict(orderby=orderby or ~count,
groupby=reduce(lambda a, b: a | b, fields),
distinct=True,
limitby=limitby))
if request.extension in ('html', 'load'):
if not pages:
content.append(DIV(current.T("No results"),
_class='w2p_wiki_form'))
def link(t):
return A(t, _href=URL(args='_search', vars=dict(q=t)))
items = [DIV(H3(A(p.wiki_page.title, _href=URL(
args=p.wiki_page.slug))),
MARKMIN(self.first_paragraph(p.wiki_page))
if preview else '',
DIV(_class='w2p_wiki_tags',
*[link(t.strip()) for t in
p.wiki_page.tags or [] if t.strip()]),
_class='w2p_wiki_search_item')
for p in pages]
content.append(DIV(_class='w2p_wiki_pages', *items))
else:
cloud = False
content = [p.wiki_page.as_dict() for p in pages]
elif cloud:
content.append(self.cloud()['content'])
if request.extension == 'load':
return content
return dict(content=content)
def cloud(self):
db = self.auth.db
count = db.wiki_tag.wiki_page.count(distinct=True)
ids = db(db.wiki_tag).select(
db.wiki_tag.name, count,
distinct=True,
groupby=db.wiki_tag.name,
orderby=~count, limitby=(0, 20))
if ids:
a, b = ids[0](count), ids[-1](count)
def style(c):
STYLE = 'padding:0 0.2em;line-height:%.2fem;font-size:%.2fem'
size = (1.5 * (c - b) / max(a - b, 1) + 1.3)
return STYLE % (1.3, size)
items = []
for item in ids:
items.append(A(item.wiki_tag.name,
_style=style(item(count)),
_href=URL(args='_search',
vars=dict(q=item.wiki_tag.name))))
items.append(' ')
return dict(content=DIV(_class='w2p_cloud', *items))
def preview(self, render):
request = current.request
# FIXME: This is an ugly hack to ensure a default render
# engine if not specified (with multiple render engines)
if not "render" in request.post_vars:
request.post_vars.render = None
return render(request.post_vars)
class Config(object):
def __init__(
self,
filename,
section,
default_values={}
):
self.config = ConfigParser.ConfigParser(default_values)
self.config.read(filename)
if not self.config.has_section(section):
self.config.add_section(section)
self.section = section
self.filename = filename
def read(self):
if not(isinstance(current.session['settings_%s' % self.section], dict)):
settings = dict(self.config.items(self.section))
else:
settings = current.session['settings_%s' % self.section]
return settings
def save(self, options):
for option, value in options:
self.config.set(self.section, option, value)
try:
self.config.write(open(self.filename, 'w'))
result = True
except:
current.session['settings_%s' % self.section] = dict(self.config.items(self.section))
result = False
return result
if __name__ == '__main__':
import doctest
doctest.testmod()
| 40.736364
| 178
| 0.531913
|
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Auth, Mail, PluginManager and various utilities
------------------------------------------------
"""
import base64
try:
import cPickle as pickle
except:
import pickle
import datetime
import thread
import logging
import sys
import glob
import os
import re
import time
import traceback
import smtplib
import urllib
import urllib2
import Cookie
import cStringIO
import ConfigParser
import email.utils
import random
from email import MIMEBase, MIMEMultipart, MIMEText, Encoders, Header, message_from_string, Charset
from gluon.contenttype import contenttype
from gluon.storage import Storage, StorageList, Settings, Messages
from gluon.utils import web2py_uuid
from gluon.fileutils import read_file, check_credentials
from gluon import *
from gluon.contrib.autolinks import expand_one
from gluon.contrib.markmin.markmin2html import \
replace_at_urls, replace_autolinks, replace_components
from pydal.objects import Row, Set, Query
import gluon.serializers as serializers
Table = DAL.Table
Field = DAL.Field
try:
import json as json_parser
except ImportError:
try:
import simplejson as json_parser
except:
import gluon.contrib.simplejson as json_parser
__all__ = ['Mail', 'Auth', 'Recaptcha', 'Crud', 'Service', 'Wiki',
'PluginManager', 'fetch', 'geocode', 'reverse_geocode', 'prettydate']
f position < 0 and len(args) >= -position:
return args[position]
elif position >= 0 and len(args) > position:
return args[position]
else:
return default
def callback(actions, form, tablename=None):
if actions:
if tablename and isinstance(actions, dict):
actions = actions.get(tablename, [])
if not isinstance(actions, (list, tuple)):
actions = [actions]
[action(form) for action in actions]
def validators(*a):
b = []
for item in a:
if isinstance(item, (list, tuple)):
b = b + list(item)
else:
b.append(item)
return b
def call_or_redirect(f, *args):
if callable(f):
redirect(f(*args))
else:
redirect(f)
def replace_id(url, form):
if url:
url = url.replace('[id]', str(form.vars.id))
if url[0] == '/' or url[:4] == 'http':
return url
return URL(url)
class Mail(object):
"""
Class for configuring and sending emails with alternative text / html
body, multiple attachments and encryption support
Works with SMTP and Google App Engine.
Args:
server: SMTP server address in address:port notation
sender: sender email address
login: sender login name and password in login:password notation
or None if no authentication is required
tls: enables/disables encryption (True by default)
In Google App Engine use ::
server='gae'
For sake of backward compatibility all fields are optional and default
to None, however, to be able to send emails at least server and sender
must be specified. They are available under following fields::
mail.settings.server
mail.settings.sender
mail.settings.login
mail.settings.timeout = 60 # seconds (default)
When server is 'logging', email is logged but not sent (debug mode)
Optionally you can use PGP encryption or X509::
mail.settings.cipher_type = None
mail.settings.gpg_home = None
mail.settings.sign = True
mail.settings.sign_passphrase = None
mail.settings.encrypt = True
mail.settings.x509_sign_keyfile = None
mail.settings.x509_sign_certfile = None
mail.settings.x509_sign_chainfile = None
mail.settings.x509_nocerts = False
mail.settings.x509_crypt_certfiles = None
cipher_type : None
gpg - need a python-pyme package and gpgme lib
x509 - smime
gpg_home : you can set a GNUPGHOME environment variable
to specify home of gnupg
sign : sign the message (True or False)
sign_passphrase : passphrase for key signing
encrypt : encrypt the message (True or False). It defaults
to True
... x509 only ...
x509_sign_keyfile : the signers private key filename or
string containing the key. (PEM format)
x509_sign_certfile: the signers certificate filename or
string containing the cert. (PEM format)
x509_sign_chainfile: sets the optional all-in-one file where you
can assemble the certificates of Certification
Authorities (CA) which form the certificate
chain of email certificate. It can be a
string containing the certs to. (PEM format)
x509_nocerts : if True then no attached certificate in mail
x509_crypt_certfiles: the certificates file or strings to encrypt
the messages with can be a file name /
string or a list of file names /
strings (PEM format)
Examples:
Create Mail object with authentication data for remote server::
mail = Mail('example.com:25', 'me@example.com', 'me:password')
Notice for GAE users:
attachments have an automatic content_id='attachment-i' where i is progressive number
in this way the can be referenced from the HTML as <img src="cid:attachment-0" /> etc.
"""
class Attachment(MIMEBase.MIMEBase):
"""
Email attachment
Args:
payload: path to file or file-like object with read() method
filename: name of the attachment stored in message; if set to
None, it will be fetched from payload path; file-like
object payload must have explicit filename specified
content_id: id of the attachment; automatically contained within
`<` and `>`
content_type: content type of the attachment; if set to None,
it will be fetched from filename using gluon.contenttype
module
encoding: encoding of all strings passed to this function (except
attachment body)
Content ID is used to identify attachments within the html body;
in example, attached image with content ID 'photo' may be used in
html message as a source of img tag `<img src="cid:photo" />`.
Example::
Create attachment from text file::
attachment = Mail.Attachment('/path/to/file.txt')
Content-Type: text/plain
MIME-Version: 1.0
Content-Disposition: attachment; filename="file.txt"
Content-Transfer-Encoding: base64
SOMEBASE64CONTENT=
Create attachment from image file with custom filename and cid::
attachment = Mail.Attachment('/path/to/file.png',
filename='photo.png',
content_id='photo')
Content-Type: image/png
MIME-Version: 1.0
Content-Disposition: attachment; filename="photo.png"
Content-Id: <photo>
Content-Transfer-Encoding: base64
SOMEOTHERBASE64CONTENT=
"""
def __init__(
self,
payload,
filename=None,
content_id=None,
content_type=None,
encoding='utf-8'):
if isinstance(payload, str):
if filename is None:
filename = os.path.basename(payload)
payload = read_file(payload, 'rb')
else:
if filename is None:
raise Exception('Missing attachment name')
payload = payload.read()
filename = filename.encode(encoding)
if content_type is None:
content_type = contenttype(filename)
self.my_filename = filename
self.my_payload = payload
MIMEBase.MIMEBase.__init__(self, *content_type.split('/', 1))
self.set_payload(payload)
self['Content-Disposition'] = 'attachment; filename="%s"' % filename
if not content_id is None:
self['Content-Id'] = '<%s>' % content_id.encode(encoding)
Encoders.encode_base64(self)
def __init__(self, server=None, sender=None, login=None, tls=True):
settings = self.settings = Settings()
settings.server = server
settings.sender = sender
settings.login = login
settings.tls = tls
settings.timeout = 60
settings.hostname = None
settings.ssl = False
settings.cipher_type = None
settings.gpg_home = None
settings.sign = True
settings.sign_passphrase = None
settings.encrypt = True
settings.x509_sign_keyfile = None
settings.x509_sign_certfile = None
settings.x509_sign_chainfile = None
settings.x509_nocerts = False
settings.x509_crypt_certfiles = None
settings.debug = False
settings.lock_keys = True
self.result = {}
self.error = None
def send(self,
to,
subject='[no subject]',
message='[no message]',
attachments=None,
cc=None,
bcc=None,
reply_to=None,
sender=None,
encoding='utf-8',
raw=False,
headers={},
from_address=None,
cipher_type=None,
sign=None,
sign_passphrase=None,
encrypt=None,
x509_sign_keyfile=None,
x509_sign_chainfile=None,
x509_sign_certfile=None,
x509_crypt_certfiles=None,
x509_nocerts=None
):
"""
Sends an email using data specified in constructor
Args:
to: list or tuple of receiver addresses; will also accept single
object
subject: subject of the email
message: email body text; depends on type of passed object:
- if 2-list or 2-tuple is passed: first element will be
source of plain text while second of html text;
- otherwise: object will be the only source of plain text
and html source will be set to None
If text or html source is:
- None: content part will be ignored,
- string: content part will be set to it,
- file-like object: content part will be fetched from it using
it's read() method
attachments: list or tuple of Mail.Attachment objects; will also
accept single object
cc: list or tuple of carbon copy receiver addresses; will also
accept single object
bcc: list or tuple of blind carbon copy receiver addresses; will
also accept single object
reply_to: address to which reply should be composed
encoding: encoding of all strings passed to this method (including
message bodies)
headers: dictionary of headers to refine the headers just before
sending mail, e.g. `{'X-Mailer' : 'web2py mailer'}`
from_address: address to appear in the 'From:' header, this is not
the envelope sender. If not specified the sender will be used
cipher_type :
gpg - need a python-pyme package and gpgme lib
x509 - smime
gpg_home : you can set a GNUPGHOME environment variable
to specify home of gnupg
sign : sign the message (True or False)
sign_passphrase : passphrase for key signing
encrypt : encrypt the message (True or False). It defaults to True.
... x509 only ...
x509_sign_keyfile : the signers private key filename or
string containing the key. (PEM format)
x509_sign_certfile: the signers certificate filename or
string containing the cert. (PEM format)
x509_sign_chainfile: sets the optional all-in-one file where you
can assemble the certificates of Certification
Authorities (CA) which form the certificate
chain of email certificate. It can be a
string containing the certs to. (PEM format)
x509_nocerts : if True then no attached certificate in mail
x509_crypt_certfiles: the certificates file or strings to encrypt
the messages with can be a file name / string or
a list of file names / strings (PEM format)
Examples:
Send plain text message to single address::
mail.send('you@example.com',
'Message subject',
'Plain text body of the message')
Send html message to single address::
mail.send('you@example.com',
'Message subject',
'<html>Plain text body of the message</html>')
Send text and html message to three addresses (two in cc)::
mail.send('you@example.com',
'Message subject',
('Plain text body', '<html>html body</html>'),
cc=['other1@example.com', 'other2@example.com'])
Send html only message with image attachment available from the
message by 'photo' content id::
mail.send('you@example.com',
'Message subject',
(None, '<html><img src="cid:photo" /></html>'),
Mail.Attachment('/path/to/photo.jpg'
content_id='photo'))
Send email with two attachments and no body text::
mail.send('you@example.com,
'Message subject',
None,
[Mail.Attachment('/path/to/fist.file'),
Mail.Attachment('/path/to/second.file')])
Returns:
True on success, False on failure.
Before return, method updates two object's fields:
- self.result: return value of smtplib.SMTP.sendmail() or GAE's
mail.send_mail() method
- self.error: Exception message or None if above was successful
"""
Charset.add_charset('utf-8', Charset.QP, Charset.QP, 'utf-8')
def encode_header(key):
if [c for c in key if 32 > ord(c) or ord(c) > 127]:
return Header.Header(key.encode('utf-8'), 'utf-8')
else:
return key
# encoded or raw text
def encoded_or_raw(text):
if raw:
text = encode_header(text)
return text
sender = sender or self.settings.sender
if not isinstance(self.settings.server, str):
raise Exception('Server address not specified')
if not isinstance(sender, str):
raise Exception('Sender address not specified')
if not raw and attachments:
# Use multipart/mixed if there is attachments
payload_in = MIMEMultipart.MIMEMultipart('mixed')
elif raw:
# no encoding configuration for raw messages
if not isinstance(message, basestring):
message = message.read()
if isinstance(message, unicode):
text = message.encode('utf-8')
elif not encoding == 'utf-8':
text = message.decode(encoding).encode('utf-8')
else:
text = message
# No charset passed to avoid transport encoding
# NOTE: some unicode encoded strings will produce
# unreadable mail contents.
payload_in = MIMEText.MIMEText(text)
if to:
if not isinstance(to, (list, tuple)):
to = [to]
else:
raise Exception('Target receiver address not specified')
if cc:
if not isinstance(cc, (list, tuple)):
cc = [cc]
if bcc:
if not isinstance(bcc, (list, tuple)):
bcc = [bcc]
if message is None:
text = html = None
elif isinstance(message, (list, tuple)):
text, html = message
elif message.strip().startswith('<html') and \
message.strip().endswith('</html>'):
text = self.settings.server == 'gae' and message or None
html = message
else:
text = message
html = None
if (not text is None or not html is None) and (not raw):
if not text is None:
if not isinstance(text, basestring):
text = text.read()
if isinstance(text, unicode):
text = text.encode('utf-8')
elif not encoding == 'utf-8':
text = text.decode(encoding).encode('utf-8')
if not html is None:
if not isinstance(html, basestring):
html = html.read()
if isinstance(html, unicode):
html = html.encode('utf-8')
elif not encoding == 'utf-8':
html = html.decode(encoding).encode('utf-8')
# Construct mime part only if needed
if text is not None and html:
# We have text and html we need multipart/alternative
attachment = MIMEMultipart.MIMEMultipart('alternative')
attachment.attach(MIMEText.MIMEText(text, _charset='utf-8'))
attachment.attach(
MIMEText.MIMEText(html, 'html', _charset='utf-8'))
elif text is not None:
attachment = MIMEText.MIMEText(text, _charset='utf-8')
elif html:
attachment = \
MIMEText.MIMEText(html, 'html', _charset='utf-8')
if attachments:
# If there is attachments put text and html into
# multipart/mixed
payload_in.attach(attachment)
else:
# No attachments no multipart/mixed
payload_in = attachment
if (attachments is None) or raw:
pass
elif isinstance(attachments, (list, tuple)):
for attachment in attachments:
payload_in.attach(attachment)
else:
payload_in.attach(attachments)
#######################################################
# CIPHER #
#######################################################
cipher_type = cipher_type or self.settings.cipher_type
sign = sign if sign != None else self.settings.sign
sign_passphrase = sign_passphrase or self.settings.sign_passphrase
encrypt = encrypt if encrypt != None else self.settings.encrypt
#######################################################
# GPGME #
#######################################################
if cipher_type == 'gpg':
if self.settings.gpg_home:
# Set GNUPGHOME environment variable to set home of gnupg
import os
os.environ['GNUPGHOME'] = self.settings.gpg_home
if not sign and not encrypt:
self.error = "No sign and no encrypt is set but cipher type to gpg"
return False
# need a python-pyme package and gpgme lib
from pyme import core, errors
from pyme.constants.sig import mode
############################################
# sign #
############################################
if sign:
import string
core.check_version(None)
pin = string.replace(payload_in.as_string(), '\n', '\r\n')
plain = core.Data(pin)
sig = core.Data()
c = core.Context()
c.set_armor(1)
c.signers_clear()
# search for signing key for From:
for sigkey in c.op_keylist_all(sender, 1):
if sigkey.can_sign:
c.signers_add(sigkey)
if not c.signers_enum(0):
self.error = 'No key for signing [%s]' % sender
return False
c.set_passphrase_cb(lambda x, y, z: sign_passphrase)
try:
# make a signature
c.op_sign(plain, sig, mode.DETACH)
sig.seek(0, 0)
# make it part of the email
payload = MIMEMultipart.MIMEMultipart('signed',
boundary=None,
_subparts=None,
**dict(
micalg="pgp-sha1",
protocol="application/pgp-signature"))
# insert the origin payload
payload.attach(payload_in)
# insert the detached signature
p = MIMEBase.MIMEBase("application", 'pgp-signature')
p.set_payload(sig.read())
payload.attach(p)
# it's just a trick to handle the no encryption case
payload_in = payload
except errors.GPGMEError, ex:
self.error = "GPG error: %s" % ex.getstring()
return False
# ENCRYPT
if encrypt:
try:
sk = X509.X509_Stack()
if not isinstance(x509_crypt_certfiles, (list, tuple)):
x509_crypt_certfiles = [x509_crypt_certfiles]
# make an encryption cert's stack
for crypt_certfile in x509_crypt_certfiles:
certfile = X509.load_cert(crypt_certfile)\
if os.path.isfile(crypt_certfile)\
else X509.load_cert_string(crypt_certfile)
sk.push(certfile)
s.set_x509_stack(sk)
s.set_cipher(SMIME.Cipher('des_ede3_cbc'))
tmp_bio = BIO.MemoryBuffer()
if sign:
s.write(tmp_bio, p7)
else:
tmp_bio.write(payload_in.as_string())
p7 = s.encrypt(tmp_bio)
except Exception, e:
self.error = "Something went wrong on encrypting: <%s>" % str(e)
return False
out = BIO.MemoryBuffer()
if encrypt:
s.write(out, p7)
else:
if sign:
s.write(out, p7, msg_bio, SMIME.PKCS7_DETACHED)
else:
out.write('\r\n')
out.write(payload_in.as_string())
out.close()
st = str(out.read())
payload = message_from_string(st)
else:
payload = payload_in
if from_address:
payload['From'] = encoded_or_raw(from_address.decode(encoding))
else:
payload['From'] = encoded_or_raw(sender.decode(encoding))
origTo = to[:]
if to:
payload['To'] = encoded_or_raw(', '.join(to).decode(encoding))
if reply_to:
payload['Reply-To'] = encoded_or_raw(reply_to.decode(encoding))
if cc:
payload['Cc'] = encoded_or_raw(', '.join(cc).decode(encoding))
to.extend(cc)
if bcc:
to.extend(bcc)
payload['Subject'] = encoded_or_raw(subject.decode(encoding))
payload['Date'] = email.utils.formatdate()
for k, v in headers.iteritems():
payload[k] = encoded_or_raw(v.decode(encoding))
result = {}
try:
if self.settings.server == 'logging':
logger.warn('email not sent\n%s\nFrom: %s\nTo: %s\nSubject: %s\n\n%s\n%s\n' %
('-' * 40, sender,
', '.join(to), subject,
text or html, '-' * 40))
elif self.settings.server == 'gae':
xcc = dict()
if cc:
xcc['cc'] = cc
if bcc:
xcc['bcc'] = bcc
if reply_to:
xcc['reply_to'] = reply_to
from google.appengine.api import mail
attachments = attachments and [mail.Attachment(
a.my_filename,
a.my_payload,
contebt_id='<attachment-%s>' % k
) for k,a in enumerate(attachments) if not raw]
if attachments:
result = mail.send_mail(
sender=sender, to=origTo,
subject=subject, body=text, html=html,
attachments=attachments, **xcc)
elif html and (not raw):
result = mail.send_mail(
sender=sender, to=origTo,
subject=subject, body=text, html=html, **xcc)
else:
result = mail.send_mail(
sender=sender, to=origTo,
subject=subject, body=text, **xcc)
else:
smtp_args = self.settings.server.split(':')
kwargs = dict(timeout=self.settings.timeout)
if self.settings.ssl:
server = smtplib.SMTP_SSL(*smtp_args, **kwargs)
else:
server = smtplib.SMTP(*smtp_args, **kwargs)
if self.settings.tls and not self.settings.ssl:
server.ehlo(self.settings.hostname)
server.starttls()
server.ehlo(self.settings.hostname)
if self.settings.login:
server.login(*self.settings.login.split(':', 1))
result = server.sendmail(
sender, to, payload.as_string())
server.quit()
except Exception, e:
logger.warn('Mail.send failure:%s' % e)
self.result = result
self.error = e
return False
self.result = result
self.error = None
return True
class Recaptcha(DIV):
"""
Examples:
Use as::
form = FORM(Recaptcha(public_key='...',private_key='...'))
or::
form = SQLFORM(...)
form.append(Recaptcha(public_key='...',private_key='...'))
"""
API_SSL_SERVER = 'https://www.google.com/recaptcha/api'
API_SERVER = 'http://www.google.com/recaptcha/api'
VERIFY_SERVER = 'http://www.google.com/recaptcha/api/verify'
def __init__(self,
request=None,
public_key='',
private_key='',
use_ssl=False,
error=None,
error_message='invalid',
label='Verify:',
options='',
comment='',
ajax=False
):
request = request or current.request
self.request_vars = request and request.vars or current.request.vars
self.remote_addr = request.env.remote_addr
self.public_key = public_key
self.private_key = private_key
self.use_ssl = use_ssl
self.error = error
self.errors = Storage()
self.error_message = error_message
self.components = []
self.attributes = {}
self.label = label
self.options = options
self.comment = comment
self.ajax = ajax
def _validate(self):
recaptcha_challenge_field = \
self.request_vars.recaptcha_challenge_field
recaptcha_response_field = \
self.request_vars.recaptcha_response_field
private_key = self.private_key
remoteip = self.remote_addr
if not (recaptcha_response_field and recaptcha_challenge_field
and len(recaptcha_response_field)
and len(recaptcha_challenge_field)):
self.errors['captcha'] = self.error_message
return False
params = urllib.urlencode({
'privatekey': private_key,
'remoteip': remoteip,
'challenge': recaptcha_challenge_field,
'response': recaptcha_response_field,
})
request = urllib2.Request(
url=self.VERIFY_SERVER,
data=params,
headers={'Content-type': 'application/x-www-form-urlencoded',
'User-agent': 'reCAPTCHA Python'})
httpresp = urllib2.urlopen(request)
return_values = httpresp.read().splitlines()
httpresp.close()
return_code = return_values[0]
if return_code == 'true':
del self.request_vars.recaptcha_challenge_field
del self.request_vars.recaptcha_response_field
self.request_vars.captcha = ''
return True
else:
self.error = return_values[1]
self.errors['captcha'] = self.error_message
return False
def xml(self):
public_key = self.public_key
use_ssl = self.use_ssl
error_param = ''
if self.error:
error_param = '&error=%s' % self.error
if use_ssl:
server = self.API_SSL_SERVER
else:
server = self.API_SERVER
if not self.ajax:
captcha = DIV(
SCRIPT("var RecaptchaOptions = {%s};" % self.options),
SCRIPT(_type="text/javascript",
_src="%s/challenge?k=%s%s" % (server, public_key, error_param)),
TAG.noscript(
IFRAME(
_src="%s/noscript?k=%s%s" % (
server, public_key, error_param),
_height="300", _width="500", _frameborder="0"), BR(),
INPUT(
_type='hidden', _name='recaptcha_response_field',
_value='manual_challenge')), _id='recaptcha')
else:
url_recaptcha_js = "%s/js/recaptcha_ajax.js" % server
RecaptchaOptions = "var RecaptchaOptions = {%s}" % self.options
script = """%(options)s;
jQuery.getScript('%(url)s',function() {
Recaptcha.create('%(public_key)s',
'recaptcha',jQuery.extend(RecaptchaOptions,{'callback':Recaptcha.focus_response_field}))
}) """ % ({'options': RecaptchaOptions, 'url': url_recaptcha_js, 'public_key': public_key})
captcha = DIV(
SCRIPT(
script,
_type="text/javascript",
),
TAG.noscript(
IFRAME(
_src="%s/noscript?k=%s%s" % (
server, public_key, error_param),
_height="300", _width="500", _frameborder="0"), BR(),
INPUT(
_type='hidden', _name='recaptcha_response_field',
_value='manual_challenge')), _id='recaptcha')
if not self.errors.captcha:
return XML(captcha).xml()
else:
captcha.append(DIV(self.errors['captcha'], _class='error'))
return XML(captcha).xml()
# this should only be used for catcha and perhaps not even for that
def addrow(form, a, b, c, style, _id, position=-1):
if style == "divs":
form[0].insert(position, DIV(DIV(LABEL(a), _class='w2p_fl'),
DIV(b, _class='w2p_fw'),
DIV(c, _class='w2p_fc'),
_id=_id))
elif style == "table2cols":
form[0].insert(position, TR(TD(LABEL(a), _class='w2p_fl'),
TD(c, _class='w2p_fc')))
form[0].insert(position + 1, TR(TD(b, _class='w2p_fw'),
_colspan=2, _id=_id))
elif style == "ul":
form[0].insert(position, LI(DIV(LABEL(a), _class='w2p_fl'),
DIV(b, _class='w2p_fw'),
DIV(c, _class='w2p_fc'),
_id=_id))
elif style == "bootstrap":
form[0].insert(position, DIV(LABEL(a, _class='control-label'),
DIV(b, SPAN(c, _class='inline-help'),
_class='controls'),
_class='control-group', _id=_id))
else:
form[0].insert(position, TR(TD(LABEL(a), _class='w2p_fl'),
TD(b, _class='w2p_fw'),
TD(c, _class='w2p_fc'), _id=_id))
class Auth(object):
default_settings = dict(
hideerror=False,
password_min_length=4,
cas_maps=None,
reset_password_requires_verification=False,
registration_requires_verification=False,
registration_requires_approval=False,
login_after_registration=False,
login_after_password_change=True,
alternate_requires_registration=False,
create_user_groups="user_%(id)s",
everybody_group_id=None,
manager_actions={},
auth_manager_role=None,
two_factor_authentication_group = None,
login_captcha=None,
register_captcha=None,
pre_registration_div=None,
retrieve_username_captcha=None,
retrieve_password_captcha=None,
captcha=None,
prevent_open_redirect_attacks=True,
prevent_password_reset_attacks=True,
expiration=3600, # one hour
long_expiration=3600 * 30 * 24, # one month
remember_me_form=True,
allow_basic_login=False,
allow_basic_login_only=False,
on_failed_authentication=lambda x: redirect(x),
formstyle=None,
label_separator=None,
logging_enabled = True,
allow_delete_accounts=False,
password_field='password',
table_user_name='auth_user',
table_group_name='auth_group',
table_membership_name='auth_membership',
table_permission_name='auth_permission',
table_event_name='auth_event',
table_cas_name='auth_cas',
table_user=None,
table_group=None,
table_membership=None,
table_permission=None,
table_event=None,
table_cas=None,
showid=False,
use_username=False,
login_email_validate=True,
login_userfield=None,
multi_login=False,
logout_onlogout=None,
register_fields=None,
register_verify_password=True,
profile_fields=None,
email_case_sensitive=True,
username_case_sensitive=True,
update_fields=['email'],
ondelete="CASCADE",
client_side=True,
renew_session_onlogin=True,
renew_session_onlogout=True,
keep_session_onlogin=True,
keep_session_onlogout=False,
wiki=Settings(),
)
# ## these are messages that can be customized
default_messages = dict(
login_button='Log In',
register_button='Sign Up',
password_reset_button='Request reset password',
password_change_button='Change password',
profile_save_button='Apply changes',
submit_button='Submit',
verify_password='Verify Password',
delete_label='Check to delete',
function_disabled='Function disabled',
access_denied='Insufficient privileges',
registration_verifying='Registration needs verification',
registration_pending='Registration is pending approval',
email_taken='This email already has an account',
invalid_username='Invalid username',
username_taken='Username already taken',
login_disabled='Login disabled by administrator',
logged_in='Logged in',
email_sent='Email sent',
unable_to_send_email='Unable to send email',
email_verified='Email verified',
logged_out='Logged out',
registration_successful='Registration successful',
invalid_email='Invalid email',
unable_send_email='Unable to send email',
invalid_login='Invalid login',
invalid_user='Invalid user',
invalid_password='Invalid password',
is_empty="Cannot be empty",
mismatched_password="Password fields don't match",
verify_email='Welcome %(username)s! Click on the link %(link)s to verify your email',
verify_email_subject='Email verification',
username_sent='Your username was emailed to you',
new_password_sent='A new password was emailed to you',
password_changed='Password changed',
retrieve_username='Your username is: %(username)s',
retrieve_username_subject='Username retrieve',
retrieve_password='Your password is: %(password)s',
retrieve_password_subject='Password retrieve',
reset_password='Click on the link %(link)s to reset your password',
reset_password_subject='Password reset',
invalid_reset_password='Invalid reset password',
profile_updated='Profile updated',
new_password='New password',
old_password='Old password',
group_description='Group uniquely assigned to user %(id)s',
register_log='User %(id)s Registered',
login_log='User %(id)s Logged-in',
login_failed_log=None,
logout_log='User %(id)s Logged-out',
profile_log='User %(id)s Profile updated',
verify_email_log='User %(id)s Verification email sent',
retrieve_username_log='User %(id)s Username retrieved',
retrieve_password_log='User %(id)s Password retrieved',
reset_password_log='User %(id)s Password reset',
change_password_log='User %(id)s Password changed',
add_group_log='Group %(group_id)s created',
del_group_log='Group %(group_id)s deleted',
add_membership_log=None,
del_membership_log=None,
has_membership_log=None,
add_permission_log=None,
del_permission_log=None,
has_permission_log=None,
impersonate_log='User %(id)s is impersonating %(other_id)s',
label_first_name='First name',
label_last_name='Last name',
label_username='Username',
label_email='E-mail',
label_password='Password',
label_registration_key='Registration key',
label_reset_password_key='Reset Password key',
label_registration_id='Registration identifier',
label_role='Role',
label_description='Description',
label_user_id='User ID',
label_group_id='Group ID',
label_name='Name',
label_table_name='Object or table name',
label_record_id='Record ID',
label_time_stamp='Timestamp',
label_client_ip='Client IP',
label_origin='Origin',
label_remember_me="Remember me (for 30 days)",
verify_password_comment='please input your password again',
)
"""
Class for authentication, authorization, role based access control.
Includes:
- registration and profile
- login and logout
- username and password retrieval
- event logging
- role creation and assignment
- user defined group/role based permission
Args:
environment: is there for legacy but unused (awful)
db: has to be the database where to create tables for authentication
mailer: `Mail(...)` or None (no mailer) or True (make a mailer)
hmac_key: can be a hmac_key or hmac_key=Auth.get_or_create_key()
controller: (where is the user action?)
cas_provider: (delegate authentication to the URL, CAS2)
Authentication Example::
from gluon.contrib.utils import *
mail=Mail()
mail.settings.server='smtp.gmail.com:587'
mail.settings.sender='you@somewhere.com'
mail.settings.login='username:password'
auth=Auth(db)
auth.settings.mailer=mail
# auth.settings....=...
auth.define_tables()
def authentication():
return dict(form=auth())
Exposes:
- `http://.../{application}/{controller}/authentication/login`
- `http://.../{application}/{controller}/authentication/logout`
- `http://.../{application}/{controller}/authentication/register`
- `http://.../{application}/{controller}/authentication/verify_email`
- `http://.../{application}/{controller}/authentication/retrieve_username`
- `http://.../{application}/{controller}/authentication/retrieve_password`
- `http://.../{application}/{controller}/authentication/reset_password`
- `http://.../{application}/{controller}/authentication/profile`
- `http://.../{application}/{controller}/authentication/change_password`
On registration a group with role=new_user.id is created
and user is given membership of this group.
You can create a group with::
group_id=auth.add_group('Manager', 'can access the manage action')
auth.add_permission(group_id, 'access to manage')
Here "access to manage" is just a user defined string.
You can give access to a user::
auth.add_membership(group_id, user_id)
If user id is omitted, the logged in user is assumed
Then you can decorate any action::
@auth.requires_permission('access to manage')
def manage():
return dict()
You can restrict a permission to a specific table::
auth.add_permission(group_id, 'edit', db.sometable)
@auth.requires_permission('edit', db.sometable)
Or to a specific record::
auth.add_permission(group_id, 'edit', db.sometable, 45)
@auth.requires_permission('edit', db.sometable, 45)
If authorization is not granted calls::
auth.settings.on_failed_authorization
Other options::
auth.settings.mailer=None
auth.settings.expiration=3600 # seconds
...
### these are messages that can be customized
...
"""
@staticmethod
def get_or_create_key(filename=None, alg='sha512'):
request = current.request
if not filename:
filename = os.path.join(request.folder, 'private', 'auth.key')
if os.path.exists(filename):
key = open(filename, 'r').read().strip()
else:
key = alg + ':' + web2py_uuid()
open(filename, 'w').write(key)
return key
def url(self, f=None, args=None, vars=None, scheme=False):
if args is None:
args = []
if vars is None:
vars = {}
return URL(c=self.settings.controller,
f=f, args=args, vars=vars, scheme=scheme)
def here(self):
return URL(args=current.request.args, vars=current.request.get_vars)
def __init__(self, environment=None, db=None, mailer=True,
hmac_key=None, controller='default', function='user',
cas_provider=None, signature=True, secure=False,
csrf_prevention=True, propagate_extension=None,
url_index=None):
stance(environment, DAL):
db = environment
self.db = db
self.environment = current
self.csrf_prevention = csrf_prevention
request = current.request
session = current.session
auth = session.auth
self.user_groups = auth and auth.user_groups or {}
if secure:
request.requires_https()
now = request.now
if auth:
delta = datetime.timedelta(days=0, seconds=auth.expiration)
if auth.last_visit and auth.last_visit + delta > now:
self.user = auth.user
if (now - auth.last_visit).seconds > (auth.expiration / 10):
auth.last_visit = request.now
else:
self.user = None
if session.auth:
del session.auth
session.renew(clear_session=True)
else:
self.user = None
if session.auth:
del session.auth
ex')
url_login = URL(controller, function, args='login',
extension = propagate_extension)
te(Auth.default_settings)
settings.update(
cas_domains=[request.env.http_host],
cas_provider=cas_provider,
cas_actions=dict(login='login',
validate='validate',
servicevalidate='serviceValidate',
proxyvalidate='proxyValidate',
logout='logout'),
extra_fields={},
actions_disabled=[],
controller=controller,
function=function,
login_url=url_login,
logged_url=URL(controller, function, args='profile'),
download_url=URL(controller, 'download'),
mailer=(mailer is True) and Mail() or mailer,
on_failed_authorization =
URL(controller, function, args='not_authorized'),
login_next = url_index,
login_onvalidation = [],
login_onaccept = [],
login_onfail = [],
login_methods = [self],
login_form = self,
logout_next = url_index,
logout_onlogout = None,
register_next = url_index,
register_onvalidation = [],
register_onaccept = [],
verify_email_next = url_login,
verify_email_onaccept = [],
profile_next = url_index,
profile_onvalidation = [],
profile_onaccept = [],
retrieve_username_next = url_index,
retrieve_password_next = url_index,
request_reset_password_next = url_login,
reset_password_next = url_index,
change_password_next = url_index,
change_password_onvalidation = [],
change_password_onaccept = [],
retrieve_password_onvalidation = [],
reset_password_onvalidation = [],
reset_password_onaccept = [],
hmac_key = hmac_key,
formstyle = current.response.formstyle,
label_separator = current.response.form_label_separator
)
settings.lock_keys = True
fault_messages)
messages.update(ajax_failed_authentication=
DIV(H4('NOT AUTHORIZED'),
'Please ',
A('login',
_href=self.settings.login_url +
('?_next=' + urllib.quote(current.request.env.http_web2py_component_location))
if current.request.env.http_web2py_component_location else ''),
' to view this content.',
_class='not-authorized alert alert-block'))
messages.lock_keys = True
response = current.response
if auth and auth.remember_me:
response.session_cookie_expires = auth.expiration
if signature:
self.define_signature()
else:
self.signature = None
def get_vars_next(self):
next = current.request.vars._next
if isinstance(next, (list, tuple)):
next = next[0]
return next
def _get_user_id(self):
"""accessor for auth.user_id"""
return self.user and self.user.id or None
user_id = property(_get_user_id, doc="user.id or None")
def table_user(self):
return self.db[self.settings.table_user_name]
def table_group(self):
return self.db[self.settings.table_group_name]
def table_membership(self):
return self.db[self.settings.table_membership_name]
def table_permission(self):
return self.db[self.settings.table_permission_name]
def table_event(self):
return self.db[self.settings.table_event_name]
def table_cas(self):
return self.db[self.settings.table_cas_name]
def _HTTP(self, *a, **b):
"""
only used in lambda: self._HTTP(404)
"""
raise HTTP(*a, **b)
def __call__(self):
"""
Example:
Use as::
def authentication():
return dict(form=auth())
"""
request = current.request
args = request.args
if not args:
redirect(self.url(args='login', vars=request.vars))
elif args[0] in self.settings.actions_disabled:
raise HTTP(404)
if args[0] in ('login', 'logout', 'register', 'verify_email',
'retrieve_username', 'retrieve_password',
'reset_password', 'request_reset_password',
'change_password', 'profile', 'groups',
'impersonate', 'not_authorized'):
if len(request.args) >= 2 and args[0] == 'impersonate':
return getattr(self, args[0])(request.args[1])
else:
return getattr(self, args[0])()
elif args[0] == 'cas' and not self.settings.cas_provider:
if args(1) == self.settings.cas_actions['login']:
return self.cas_login(version=2)
elif args(1) == self.settings.cas_actions['validate']:
return self.cas_validate(version=1)
elif args(1) == self.settings.cas_actions['servicevalidate']:
return self.cas_validate(version=2, proxy=False)
elif args(1) == self.settings.cas_actions['proxyvalidate']:
return self.cas_validate(version=2, proxy=True)
elif args(1) == self.settings.cas_actions['logout']:
return self.logout(next=request.vars.service or DEFAULT)
else:
raise HTTP(404)
def navbar(self, prefix='Welcome', action=None,
separators=(' [ ', ' | ', ' ] '), user_identifier=DEFAULT,
referrer_actions=DEFAULT, mode='default'):
""" Navbar with support for more templates
This uses some code from the old navbar.
Args:
mode: see options for list of
"""
items = []
self.bar = ''
T = current.T
referrer_actions = [] if not referrer_actions else referrer_actions
if not action:
action = self.url(self.settings.function)
request = current.request
if URL() == action:
next = ''
else:
next = '?_next=' + urllib.quote(URL(args=request.args,
vars=request.get_vars))
href = lambda function: '%s/%s%s' % (action, function, next
if referrer_actions is DEFAULT
or function in referrer_actions
else '')
if isinstance(prefix, str):
prefix = T(prefix)
if prefix:
prefix = prefix.strip() + ' '
def Anr(*a, **b):
b['_rel'] = 'nofollow'
return A(*a, **b)
if self.user_id:
logout_next = self.settings.logout_next
items.append({'name': T('Log Out'),
'href': '%s/logout?_next=%s' % (action,
urllib.quote(
logout_next)),
'icon': 'icon-off'})
if not 'profile' in self.settings.actions_disabled:
items.append({'name': T('Profile'), 'href': href('profile'),
'icon': 'icon-user'})
if not 'change_password' in self.settings.actions_disabled:
items.append({'name': T('Password'),
'href': href('change_password'),
'icon': 'icon-lock'})
if user_identifier is DEFAULT:
user_identifier = '%(first_name)s'
if callable(user_identifier):
user_identifier = user_identifier(self.user)
elif ((isinstance(user_identifier, str) or
type(user_identifier).__name__ == 'lazyT') and
re.search(r'%\(.+\)s', user_identifier)):
user_identifier = user_identifier % self.user
if not user_identifier:
user_identifier = ''
else:
items.append({'name': T('Log In'), 'href': href('login'),
'icon': 'icon-off'})
if not 'register' in self.settings.actions_disabled:
items.append({'name': T('Sign Up'), 'href': href('register'),
'icon': 'icon-user'})
if not 'request_reset_password' in self.settings.actions_disabled:
items.append({'name': T('Lost password?'),
'href': href('request_reset_password'),
'icon': 'icon-lock'})
if (self.settings.use_username and not
'retrieve_username' in self.settings.actions_disabled):
items.append({'name': T('Forgot username?'),
'href': href('retrieve_username'),
'icon': 'icon-edit'})
def menu():
self.bar = [(items[0]['name'], False, items[0]['href'], [])]
del items[0]
for item in items:
self.bar[0][3].append((item['name'], False, item['href']))
def bootstrap3():
def rename(icon): return icon+' '+icon.replace('icon', 'glyphicon')
self.bar = UL(LI(Anr(I(_class=rename('icon '+items[0]['icon'])),
' ' + items[0]['name'],
_href=items[0]['href'])), _class='dropdown-menu')
del items[0]
for item in items:
self.bar.insert(-1, LI(Anr(I(_class=rename('icon '+item['icon'])),
' ' + item['name'],
_href=item['href'])))
self.bar.insert(-1, LI('', _class='divider'))
if self.user_id:
self.bar = LI(Anr(prefix, user_identifier,
_href='#', _class="dropdown-toggle",
data={'toggle': 'dropdown'}),
self.bar, _class='dropdown')
else:
self.bar = LI(Anr(T('Log In'),
_href='#', _class="dropdown-toggle",
data={'toggle': 'dropdown'}), self.bar,
_class='dropdown')
def bare():
""" In order to do advanced customization we only need the
prefix, the user_identifier and the href attribute of items
Examples:
Use as::
# in module custom_layout.py
from gluon import *
def navbar(auth_navbar):
bar = auth_navbar
user = bar["user"]
if not user:
btn_login = A(current.T("Login"),
_href=bar["login"],
_class="btn btn-success",
_rel="nofollow")
btn_register = A(current.T("Sign up"),
_href=bar["register"],
_class="btn btn-primary",
_rel="nofollow")
return DIV(btn_register, btn_login, _class="btn-group")
else:
toggletext = "%s back %s" % (bar["prefix"], user)
toggle = A(toggletext,
_href="#",
_class="dropdown-toggle",
_rel="nofollow",
**{"_data-toggle": "dropdown"})
li_profile = LI(A(I(_class="icon-user"), ' ',
current.T("Account details"),
_href=bar["profile"], _rel="nofollow"))
li_custom = LI(A(I(_class="icon-book"), ' ',
current.T("My Agenda"),
_href="#", rel="nofollow"))
li_logout = LI(A(I(_class="icon-off"), ' ',
current.T("logout"),
_href=bar["logout"], _rel="nofollow"))
dropdown = UL(li_profile,
li_custom,
LI('', _class="divider"),
li_logout,
_class="dropdown-menu", _role="menu")
return LI(toggle, dropdown, _class="dropdown")
# in models db.py
import custom_layout as custom
# in layout.html
<ul id="navbar" class="nav pull-right">
{{='auth' in globals() and \
custom.navbar(auth.navbar(mode='bare')) or ''}}</ul>
"""
bare = {}
bare['prefix'] = prefix
bare['user'] = user_identifier if self.user_id else None
for i in items:
if i['name'] == T('Log In'):
k = 'login'
elif i['name'] == T('Sign Up'):
k = 'register'
elif i['name'] == T('Lost password?'):
k = 'request_reset_password'
elif i['name'] == T('Forgot username?'):
k = 'retrieve_username'
elif i['name'] == T('Log Out'):
k = 'logout'
elif i['name'] == T('Profile'):
k = 'profile'
elif i['name'] == T('Password'):
k = 'change_password'
bare[k] = i['href']
self.bar = bare
options = {'asmenu': menu,
'dropdown': bootstrap3,
'bare': bare
}
if mode in options and callable(options[mode]):
options[mode]()
else:
s1, s2, s3 = separators
if self.user_id:
self.bar = SPAN(prefix, user_identifier, s1,
Anr(items[0]['name'],
_href=items[0]['href']), s3,
_class='auth_navbar')
else:
self.bar = SPAN(s1, Anr(items[0]['name'],
_href=items[0]['href']), s3,
_class='auth_navbar')
for item in items[1:]:
self.bar.insert(-1, s2)
self.bar.insert(-1, Anr(item['name'], _href=item['href']))
return self.bar
def __get_migrate(self, tablename, migrate=True):
if type(migrate).__name__ == 'str':
return (migrate + tablename + '.table')
elif migrate == False:
return False
else:
return True
def enable_record_versioning(self,
tables,
archive_db=None,
archive_names='%(tablename)s_archive',
current_record='current_record',
current_record_label=None):
"""
Used to enable full record versioning (including auth tables)::
auth = Auth(db)
auth.define_tables(signature=True)
# define our own tables
db.define_table('mything',Field('name'),auth.signature)
auth.enable_record_versioning(tables=db)
tables can be the db (all table) or a list of tables.
only tables with modified_by and modified_on fiels (as created
by auth.signature) will have versioning. Old record versions will be
in table 'mything_archive' automatically defined.
when you enable enable_record_versioning, records are never
deleted but marked with is_active=False.
enable_record_versioning enables a common_filter for
every table that filters out records with is_active = False
Note:
If you use auth.enable_record_versioning,
do not use auth.archive or you will end up with duplicates.
auth.archive does explicitly what enable_record_versioning
does automatically.
"""
current_record_label = current_record_label or current.T(
current_record.replace('_', ' ').title())
for table in tables:
fieldnames = table.fields()
if ('id' in fieldnames and
'modified_on' in fieldnames and
not current_record in fieldnames):
table._enable_record_versioning(
archive_db=archive_db,
archive_name=archive_names,
current_record=current_record,
current_record_label=current_record_label)
def define_signature(self):
db = self.db
settings = self.settings
request = current.request
T = current.T
reference_user = 'reference %s' % settings.table_user_name
def lazy_user(auth=self):
return auth.user_id
def represent(id, record=None, s=settings):
try:
user = s.table_user(id)
return '%s %s' % (user.get("first_name", user.get("email")),
user.get("last_name", ''))
except:
return id
ondelete = self.settings.ondelete
self.signature = Table(
self.db, 'auth_signature',
Field('is_active', 'boolean',
default=True,
readable=False, writable=False,
label=T('Is Active')),
Field('created_on', 'datetime',
default=request.now,
writable=False, readable=False,
label=T('Created On')),
Field('created_by',
reference_user,
default=lazy_user, represent=represent,
writable=False, readable=False,
label=T('Created By'), ondelete=ondelete),
Field('modified_on', 'datetime',
update=request.now, default=request.now,
writable=False, readable=False,
label=T('Modified On')),
Field('modified_by',
reference_user, represent=represent,
default=lazy_user, update=lazy_user,
writable=False, readable=False,
label=T('Modified By'), ondelete=ondelete))
def define_tables(self, username=None, signature=None,
migrate=None, fake_migrate=None):
"""
To be called unless tables are defined manually
Examples:
Use as::
# defines all needed tables and table files
# 'myprefix_auth_user.table', ...
auth.define_tables(migrate='myprefix_')
# defines all needed tables without migration/table files
auth.define_tables(migrate=False)
"""
db = self.db
if migrate is None:
migrate = db._migrate
if fake_migrate is None:
fake_migrate = db._fake_migrate
settings = self.settings
if username is None:
username = settings.use_username
else:
settings.use_username = username
if not self.signature:
self.define_signature()
if signature == True:
signature_list = [self.signature]
elif not signature:
signature_list = []
elif isinstance(signature, Table):
signature_list = [signature]
else:
signature_list = signature
is_not_empty = IS_NOT_EMPTY(error_message=self.messages.is_empty)
is_crypted = CRYPT(key=settings.hmac_key,
min_length=settings.password_min_length)
is_unique_email = [
IS_EMAIL(error_message=self.messages.invalid_email),
IS_NOT_IN_DB(db, '%s.email' % settings.table_user_name,
error_message=self.messages.email_taken)]
if not settings.email_case_sensitive:
is_unique_email.insert(1, IS_LOWER())
if not settings.table_user_name in db.tables:
passfield = settings.password_field
extra_fields = settings.extra_fields.get(
settings.table_user_name, []) + signature_list
if username or settings.cas_provider:
is_unique_username = \
[IS_MATCH('[\w\.\-]+', strict=True,
error_message=self.messages.invalid_username),
IS_NOT_IN_DB(db, '%s.username' % settings.table_user_name,
error_message=self.messages.username_taken)]
if not settings.username_case_sensitive:
is_unique_username.insert(1, IS_LOWER())
db.define_table(
settings.table_user_name,
Field('first_name', length=128, default='',
label=self.messages.label_first_name,
requires=is_not_empty),
Field('last_name', length=128, default='',
label=self.messages.label_last_name,
requires=is_not_empty),
Field('email', length=512, default='',
label=self.messages.label_email,
requires=is_unique_email),
Field('username', length=128, default='',
label=self.messages.label_username,
requires=is_unique_username),
Field(passfield, 'password', length=512,
readable=False, label=self.messages.label_password,
requires=[is_crypted]),
Field('registration_key', length=512,
writable=False, readable=False, default='',
label=self.messages.label_registration_key),
Field('reset_password_key', length=512,
writable=False, readable=False, default='',
label=self.messages.label_reset_password_key),
Field('registration_id', length=512,
writable=False, readable=False, default='',
label=self.messages.label_registration_id),
*extra_fields,
**dict(
migrate=self.__get_migrate(settings.table_user_name,
migrate),
fake_migrate=fake_migrate,
format='%(username)s'))
else:
db.define_table(
settings.table_user_name,
Field('first_name', length=128, default='',
label=self.messages.label_first_name,
requires=is_not_empty),
Field('last_name', length=128, default='',
label=self.messages.label_last_name,
requires=is_not_empty),
Field('email', length=512, default='',
label=self.messages.label_email,
requires=is_unique_email),
Field(passfield, 'password', length=512,
readable=False, label=self.messages.label_password,
requires=[is_crypted]),
Field('registration_key', length=512,
writable=False, readable=False, default='',
label=self.messages.label_registration_key),
Field('reset_password_key', length=512,
writable=False, readable=False, default='',
label=self.messages.label_reset_password_key),
Field('registration_id', length=512,
writable=False, readable=False, default='',
label=self.messages.label_registration_id),
*extra_fields,
**dict(
migrate=self.__get_migrate(settings.table_user_name,
migrate),
fake_migrate=fake_migrate,
format='%(first_name)s %(last_name)s (%(id)s)'))
reference_table_user = 'reference %s' % settings.table_user_name
if not settings.table_group_name in db.tables:
extra_fields = settings.extra_fields.get(
settings.table_group_name, []) + signature_list
db.define_table(
settings.table_group_name,
Field('role', length=512, default='',
label=self.messages.label_role,
requires=IS_NOT_IN_DB(db, '%s.role' % settings.table_group_name)),
Field('description', 'text',
label=self.messages.label_description),
*extra_fields,
**dict(
migrate=self.__get_migrate(
settings.table_group_name, migrate),
fake_migrate=fake_migrate,
format='%(role)s (%(id)s)'))
reference_table_group = 'reference %s' % settings.table_group_name
if not settings.table_membership_name in db.tables:
extra_fields = settings.extra_fields.get(
settings.table_membership_name, []) + signature_list
db.define_table(
settings.table_membership_name,
Field('user_id', reference_table_user,
label=self.messages.label_user_id),
Field('group_id', reference_table_group,
label=self.messages.label_group_id),
*extra_fields,
**dict(
migrate=self.__get_migrate(
settings.table_membership_name, migrate),
fake_migrate=fake_migrate))
if not settings.table_permission_name in db.tables:
extra_fields = settings.extra_fields.get(
settings.table_permission_name, []) + signature_list
db.define_table(
settings.table_permission_name,
Field('group_id', reference_table_group,
label=self.messages.label_group_id),
Field('name', default='default', length=512,
label=self.messages.label_name,
requires=is_not_empty),
Field('table_name', length=512,
label=self.messages.label_table_name),
Field('record_id', 'integer', default=0,
label=self.messages.label_record_id,
requires=IS_INT_IN_RANGE(0, 10 ** 9)),
*extra_fields,
**dict(
migrate=self.__get_migrate(
settings.table_permission_name, migrate),
fake_migrate=fake_migrate))
if not settings.table_event_name in db.tables:
db.define_table(
settings.table_event_name,
Field('time_stamp', 'datetime',
default=current.request.now,
label=self.messages.label_time_stamp),
Field('client_ip',
default=current.request.client,
label=self.messages.label_client_ip),
Field('user_id', reference_table_user, default=None,
label=self.messages.label_user_id),
Field('origin', default='auth', length=512,
label=self.messages.label_origin,
requires=is_not_empty),
Field('description', 'text', default='',
label=self.messages.label_description,
requires=is_not_empty),
*settings.extra_fields.get(settings.table_event_name, []),
**dict(
migrate=self.__get_migrate(
settings.table_event_name, migrate),
fake_migrate=fake_migrate))
now = current.request.now
if settings.cas_domains:
if not settings.table_cas_name in db.tables:
db.define_table(
settings.table_cas_name,
Field('user_id', reference_table_user, default=None,
label=self.messages.label_user_id),
Field('created_on', 'datetime', default=now),
Field('service', requires=IS_URL()),
Field('ticket'),
Field('renew', 'boolean', default=False),
*settings.extra_fields.get(settings.table_cas_name, []),
**dict(
migrate=self.__get_migrate(
settings.table_cas_name, migrate),
fake_migrate=fake_migrate))
if not db._lazy_tables:
settings.table_user = db[settings.table_user_name]
settings.table_group = db[settings.table_group_name]
settings.table_membership = db[settings.table_membership_name]
settings.table_permission = db[settings.table_permission_name]
settings.table_event = db[settings.table_event_name]
if settings.cas_domains:
settings.table_cas = db[settings.table_cas_name]
if settings.cas_provider:
settings.actions_disabled = \
['profile', 'register', 'change_password',
'request_reset_password', 'retrieve_username']
from gluon.contrib.login_methods.cas_auth import CasAuth
maps = settings.cas_maps
if not maps:
table_user = self.table_user()
maps = dict((name, lambda v, n=name: v.get(n, None)) for name in
table_user.fields if name != 'id'
and table_user[name].readable)
maps['registration_id'] = \
lambda v, p=settings.cas_provider: '%s/%s' % (p, v['user'])
actions = [settings.cas_actions['login'],
settings.cas_actions['servicevalidate'],
settings.cas_actions['logout']]
settings.login_form = CasAuth(
casversion=2,
urlbase=settings.cas_provider,
actions=actions,
maps=maps)
return self
def log_event(self, description, vars=None, origin='auth'):
"""
Examples:
Use as::
auth.log_event(description='this happened', origin='auth')
"""
if not self.settings.logging_enabled or not description:
return
elif self.is_logged_in():
user_id = self.user.id
else:
user_id = None
vars = vars or {}
if type(description).__name__ == 'lazyT':
description = description.m
self.table_event().insert(
description=str(description % vars),
origin=origin, user_id=user_id)
def get_or_create_user(self, keys, update_fields=['email'],
login=True, get=True):
"""
Used for alternate login methods:
If the user exists already then password is updated.
If the user doesn't yet exist, then they are created.
"""
table_user = self.table_user()
user = None
checks = []
# make a guess about who this user is
for fieldname in ['registration_id', 'username', 'email']:
if fieldname in table_user.fields() and \
keys.get(fieldname, None):
checks.append(fieldname)
value = keys[fieldname]
user = table_user(**{fieldname: value})
if user:
break
if not checks:
return None
if not 'registration_id' in keys:
keys['registration_id'] = keys[checks[0]]
# if we think we found the user but registration_id does not match,
# make new user
if 'registration_id' in checks \
and user \
and user.registration_id \
and ('registration_id' not in keys or user.registration_id != str(keys['registration_id'])):
user = None # THINK MORE ABOUT THIS? DO WE TRUST OPENID PROVIDER?
if user:
if not get:
# added for register_bare to avoid overwriting users
return None
update_keys = dict(registration_id=keys['registration_id'])
for key in update_fields:
if key in keys:
update_keys[key] = keys[key]
user.update_record(**update_keys)
elif checks:
if not 'first_name' in keys and 'first_name' in table_user.fields:
guess = keys.get('email', 'anonymous').split('@')[0]
keys['first_name'] = keys.get('username', guess)
user_id = table_user.insert(**table_user._filter_fields(keys))
user = table_user[user_id]
if self.settings.create_user_groups:
group_id = self.add_group(
self.settings.create_user_groups % user)
self.add_membership(group_id, user_id)
if self.settings.everybody_group_id:
self.add_membership(self.settings.everybody_group_id, user_id)
if login:
self.user = user
return user
def basic(self, basic_auth_realm=False):
"""
Performs basic login.
Args:
basic_auth_realm: optional basic http authentication realm. Can take
str or unicode or function or callable or boolean.
reads current.request.env.http_authorization
and returns basic_allowed,basic_accepted,user.
if basic_auth_realm is defined is a callable it's return value
is used to set the basic authentication realm, if it's a string
its content is used instead. Otherwise basic authentication realm
is set to the application name.
If basic_auth_realm is None or False (the default) the behavior
is to skip sending any challenge.
"""
if not self.settings.allow_basic_login:
return (False, False, False)
basic = current.request.env.http_authorization
if basic_auth_realm:
if callable(basic_auth_realm):
basic_auth_realm = basic_auth_realm()
elif isinstance(basic_auth_realm, (unicode, str)):
basic_realm = unicode(basic_auth_realm)
elif basic_auth_realm is True:
basic_realm = u'' + current.request.application
http_401 = HTTP(401, u'Not Authorized', **{'WWW-Authenticate': u'Basic realm="' + basic_realm + '"'})
if not basic or not basic[:6].lower() == 'basic ':
if basic_auth_realm:
raise http_401
return (True, False, False)
(username, sep, password) = base64.b64decode(basic[6:]).partition(':')
is_valid_user = sep and self.login_bare(username, password)
if not is_valid_user and basic_auth_realm:
raise http_401
return (True, True, is_valid_user)
def login_user(self, user):
"""
Logins the `user = db.auth_user(id)`
"""
from gluon.settings import global_settings
if global_settings.web2py_runtime_gae:
user = Row(self.table_user()._filter_fields(user, id=True))
delattr(user, 'password')
else:
user = Row(user)
for key, value in user.items():
if callable(value) or key == 'password':
delattr(user, key)
if self.settings.renew_session_onlogin:
current.session.renew(clear_session=not self.settings.keep_session_onlogin)
current.session.auth = Storage(user=user,
last_visit=current.request.now,
expiration=self.settings.expiration,
hmac_key=web2py_uuid())
self.user = user
self.update_groups()
def _get_login_settings(self):
table_user = self.table_user()
userfield = self.settings.login_userfield or 'username' \
if 'username' in table_user.fields else 'email'
passfield = self.settings.password_field
return Storage({"table_user": table_user,
"userfield": userfield,
"passfield": passfield})
def login_bare(self, username, password):
"""
Logins user as specified by username (or email) and password
"""
settings = self._get_login_settings()
user = settings.table_user(**{settings.userfield: \
username})
if user and user.get(settings.passfield, False):
password = settings.table_user[
settings.passfield].validate(password)[0]
if ((user.registration_key is None or
not user.registration_key.strip()) and
password == user[settings.passfield]):
self.login_user(user)
return user
else:
# user not in database try other login methods
for login_method in self.settings.login_methods:
if login_method != self and login_method(username, password):
self.user = username
return username
return False
def register_bare(self, **fields):
"""
Registers a user as specified by username (or email)
and a raw password.
"""
settings = self._get_login_settings()
if not fields.get(settings.passfield):
raise ValueError("register_bare: " +
"password not provided or invalid")
elif not fields.get(settings.userfield):
raise ValueError("register_bare: " +
"userfield not provided or invalid")
fields[settings.passfield] = settings.table_user[settings.passfield].validate(fields[settings.passfield])[0]
user = self.get_or_create_user(fields, login=False, get=False, update_fields=self.settings.update_fields)
if not user:
# get or create did not create a user (it ignores duplicate records)
return False
return user
def cas_login(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
version=2,
):
request = current.request
response = current.response
session = current.session
db, table = self.db, self.table_cas()
session._cas_service = request.vars.service or session._cas_service
if not request.env.http_host in self.settings.cas_domains or \
not session._cas_service:
raise HTTP(403, 'not authorized')
def allow_access(interactivelogin=False):
row = table(service=session._cas_service, user_id=self.user.id)
if row:
ticket = row.ticket
else:
ticket = 'ST-' + web2py_uuid()
table.insert(service=session._cas_service,
user_id=self.user.id,
ticket=ticket,
created_on=request.now,
renew=interactivelogin)
service = session._cas_service
query_sep = '&' if '?' in service else '?'
del session._cas_service
if 'warn' in request.vars and not interactivelogin:
response.headers[
'refresh'] = "5;URL=%s" % service + query_sep + "ticket=" + ticket
return A("Continue to %s" % service,
_href=service + query_sep + "ticket=" + ticket)
else:
redirect(service + query_sep + "ticket=" + ticket)
if self.is_logged_in() and not 'renew' in request.vars:
return allow_access()
elif not self.is_logged_in() and 'gateway' in request.vars:
redirect(service)
def cas_onaccept(form, onaccept=onaccept):
if not onaccept is DEFAULT:
onaccept(form)
return allow_access(interactivelogin=True)
return self.login(next, onvalidation, cas_onaccept, log)
def cas_validate(self, version=2, proxy=False):
request = current.request
db, table = self.db, self.table_cas()
current.response.headers['Content-Type'] = 'text'
ticket = request.vars.ticket
renew = 'renew' in request.vars
row = table(ticket=ticket)
success = False
if row:
userfield = self.settings.login_userfield or 'username' \
if 'username' in table.fields else 'email'
# If ticket is a service Ticket and RENEW flag respected
if ticket[0:3] == 'ST-' and \
not ((row.renew and renew) ^ renew):
user = self.table_user()(row.user_id)
row.delete_record()
success = True
def build_response(body):
return '<?xml version="1.0" encoding="UTF-8"?>\n' +\
TAG['cas:serviceResponse'](
body, **{'_xmlns:cas': 'http://www.yale.edu/tp/cas'}).xml()
if success:
if version == 1:
message = 'yes\n%s' % user[userfield]
else: # assume version 2
username = user.get('username', user[userfield])
message = build_response(
TAG['cas:authenticationSuccess'](
TAG['cas:user'](username),
*[TAG['cas:' + field.name](user[field.name])
for field in self.table_user()
if field.readable]))
else:
if version == 1:
message = 'no\n'
elif row:
message = build_response(TAG['cas:authenticationFailure']())
else:
message = build_response(
TAG['cas:authenticationFailure'](
'Ticket %s not recognized' % ticket,
_code='INVALID TICKET'))
raise HTTP(200, message)
def _reset_two_factor_auth(self, session):
"""When two-step authentication is enabled, this function is used to
clear the session after successfully completing second challenge
or when the maximum number of tries allowed has expired.
"""
session.auth_two_factor_user = None
session.auth_two_factor = None
session.auth_two_factor_enabled = False
# Allow up to 4 attempts (the 1st one plus 3 more)
session.auth_two_factor_tries_left = 3
def login(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a login form
"""
table_user = self.table_user()
settings = self.settings
if 'username' in table_user.fields or \
not settings.login_email_validate:
tmpvalidator = IS_NOT_EMPTY(error_message=self.messages.is_empty)
if not settings.username_case_sensitive:
tmpvalidator = [IS_LOWER(), tmpvalidator]
else:
tmpvalidator = IS_EMAIL(error_message=self.messages.invalid_email)
if not settings.email_case_sensitive:
tmpvalidator = [IS_LOWER(), tmpvalidator]
request = current.request
response = current.response
session = current.session
passfield = settings.password_field
try:
table_user[passfield].requires[-1].min_length = 0
except:
pass
### use session for federated login
snext = self.get_vars_next()
if snext and self.settings.prevent_open_redirect_attacks:
items = snext.split('/')
if '//' in snext and items[2] != request.env.http_host:
snext = None
if snext:
session._auth_next = snext
elif session._auth_next:
snext = session._auth_next
### pass
if next is DEFAULT:
# important for security
next = settings.login_next
if callable(next):
next = next()
user_next = snext
if user_next:
external = user_next.split('://')
if external[0].lower() in ['http', 'https', 'ftp']:
host_next = user_next.split('//', 1)[-1].split('/')[0]
if host_next in settings.cas_domains:
next = user_next
else:
next = user_next
if onvalidation is DEFAULT:
onvalidation = settings.login_onvalidation
if onaccept is DEFAULT:
onaccept = settings.login_onaccept
if log is DEFAULT:
log = self.messages['login_log']
onfail = settings.login_onfail
user = None # default
#Setup the default field used for the form
multi_login = False
if self.settings.login_userfield:
username = self.settings.login_userfield
else:
if 'username' in table_user.fields:
username = 'username'
else:
username = 'email'
if self.settings.multi_login:
multi_login = True
old_requires = table_user[username].requires
table_user[username].requires = tmpvalidator
# If two-factor authentication is enabled, and the maximum
# number of tries allowed is used up, reset the session to
# pre-login state with two-factor auth
if session.auth_two_factor_enabled and session.auth_two_factor_tries_left < 1:
# Exceeded maximum allowed tries for this code. Require user to enter
# username and password again.
user = None
accepted_form = False
self._reset_two_factor_auth(session)
# Redirect to the default 'next' page without logging
# in. If that page requires login, user will be redirected
# back to the main login form
redirect(next, client_side=settings.client_side)
# Before showing the default login form, check whether
# we are already on the second step of two-step authentication.
# If we are, then skip this login form and use the form for the
# second challenge instead.
# Note to devs: The code inside the if-block is unchanged from the
# previous version of this file, other than for indentation inside
# to put it inside the if-block
if session.auth_two_factor_user is None:
if settings.remember_me_form:
extra_fields = [
Field('remember_me', 'boolean', default=False,
label = self.messages.label_remember_me)]
else:
extra_fields = []
# do we use our own login form, or from a central source?
if settings.login_form == self:
form = SQLFORM(
table_user,
fields=[username, passfield],
hidden=dict(_next=next),
showid=settings.showid,
submit_button=self.messages.login_button,
delete_label=self.messages.delete_label,
formstyle=settings.formstyle,
separator=settings.label_separator,
extra_fields = extra_fields,
)
captcha = settings.login_captcha or \
(settings.login_captcha != False and settings.captcha)
if captcha:
addrow(form, captcha.label, captcha, captcha.comment,
settings.formstyle, 'captcha__row')
accepted_form = False
if form.accepts(request, session if self.csrf_prevention else None,
formname='login', dbio=False,
onvalidation=onvalidation,
hideerror=settings.hideerror):
accepted_form = True
# check for username in db
entered_username = form.vars[username]
if multi_login and '@' in entered_username:
# if '@' in username check for email, not username
user = table_user(email = entered_username)
else:
user = table_user(**{username: entered_username})
if user:
# user in db, check if registration pending or disabled
temp_user = user
if temp_user.registration_key == 'pending':
response.flash = self.messages.registration_pending
return form
elif temp_user.registration_key in ('disabled', 'blocked'):
response.flash = self.messages.login_disabled
return form
elif (not temp_user.registration_key is None
and temp_user.registration_key.strip()):
response.flash = \
self.messages.registration_verifying
return form
# try alternate logins 1st as these have the
# current version of the password
user = None
for login_method in settings.login_methods:
if login_method != self and \
login_method(request.vars[username],
request.vars[passfield]):
if not self in settings.login_methods:
# do not store password in db
form.vars[passfield] = None
user = self.get_or_create_user(
form.vars, settings.update_fields)
break
if not user:
# alternates have failed, maybe because service inaccessible
if settings.login_methods[0] == self:
# try logging in locally using cached credentials
if form.vars.get(passfield, '') == temp_user[passfield]:
# success
user = temp_user
else:
# user not in db
if not settings.alternate_requires_registration:
# we're allowed to auto-register users from external systems
for login_method in settings.login_methods:
if login_method != self and \
login_method(request.vars[username],
request.vars[passfield]):
if not self in settings.login_methods:
form.vars[passfield] = None
user = self.get_or_create_user(
form.vars, settings.update_fields)
break
if not user:
self.log_event(self.messages['login_failed_log'],
request.post_vars)
session.flash = self.messages.invalid_login
callback(onfail, None)
redirect(
self.url(args=request.args, vars=request.get_vars),
client_side=settings.client_side)
else:
cas = settings.login_form
cas_user = cas.get_user()
if cas_user:
cas_user[passfield] = None
user = self.get_or_create_user(
table_user._filter_fields(cas_user),
settings.update_fields)
elif hasattr(cas, 'login_form'):
return cas.login_form()
else:
next = self.url(settings.function, args='login')
redirect(cas.login_url(next),
client_side=settings.client_side)
subject="Two-step Login Authentication Code",
message="Your temporary login code is {0}".format(session.auth_two_factor))
if form.accepts(request, session if self.csrf_prevention else None,
formname='login', dbio=False,
onvalidation=onvalidation,
hideerror=settings.hideerror):
accepted_form = True
if form.vars['authentication_code'] == str(session.auth_two_factor):
if user is None or user == session.auth_two_factor_user:
user = session.auth_two_factor_user
elif user != session.auth_two_factor_user:
user = None
self._reset_two_factor_auth(session)
else:
response.flash = 'Incorrect code. {0} more attempt(s) remaining.'.format(session.auth_two_factor_tries_left)
session.auth_two_factor_tries_left -= 1
return form
else:
return form
if user:
user = Row(table_user._filter_fields(user, id=True))
self.login_user(user)
session.auth.expiration = \
request.post_vars.remember_me and \
settings.long_expiration or \
settings.expiration
session.auth.remember_me = 'remember_me' in request.post_vars
self.log_event(log, user)
session.flash = self.messages.logged_in
if settings.login_form == self:
if accepted_form:
callback(onaccept, form)
if next == session._auth_next:
session._auth_next = None
next = replace_id(next, form)
redirect(next, client_side=settings.client_side)
table_user[username].requires = old_requires
return form
elif user:
callback(onaccept, None)
if next == session._auth_next:
del session._auth_next
redirect(next, client_side=settings.client_side)
def logout(self, next=DEFAULT, onlogout=DEFAULT, log=DEFAULT):
"""
Logouts and redirects to login
"""
self._reset_two_factor_auth(current.session)
if next is DEFAULT:
next = self.get_vars_next() or self.settings.logout_next
if onlogout is DEFAULT:
onlogout = self.settings.logout_onlogout
if onlogout:
onlogout(self.user)
if log is DEFAULT:
log = self.messages['logout_log']
if self.user:
self.log_event(log, self.user)
if self.settings.login_form != self:
cas = self.settings.login_form
cas_user = cas.get_user()
if cas_user:
next = cas.logout_url(next)
current.session.auth = None
if self.settings.renew_session_onlogout:
current.session.renew(clear_session=not self.settings.keep_session_onlogout)
current.session.flash = self.messages.logged_out
if not next is None:
redirect(next)
def register(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a registration form
"""
table_user = self.table_user()
request = current.request
response = current.response
session = current.session
if self.is_logged_in():
redirect(self.settings.logged_url,
client_side=self.settings.client_side)
if next is DEFAULT:
next = self.get_vars_next() or self.settings.register_next
if onvalidation is DEFAULT:
onvalidation = self.settings.register_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.register_onaccept
if log is DEFAULT:
log = self.messages['register_log']
table_user = self.table_user()
if self.settings.login_userfield:
username = self.settings.login_userfield
elif 'username' in table_user.fields:
username = 'username'
else:
username = 'email'
unique_validator = IS_NOT_IN_DB(self.db, table_user[username])
if not table_user[username].requires:
table_user[username].requires = unique_validator
elif isinstance(table_user[username].requires, (list, tuple)):
if not any([isinstance(validator, IS_NOT_IN_DB) for validator in
table_user[username].requires]):
if isinstance(table_user[username].requires, list):
table_user[username].requires.append(unique_validator)
else:
table_user[username].requires += (unique_validator, )
elif not isinstance(table_user[username].requires, IS_NOT_IN_DB):
table_user[username].requires = [table_user[username].requires,
unique_validator]
passfield = self.settings.password_field
formstyle = self.settings.formstyle
if self.settings.register_verify_password:
extra_fields = [
Field("password_two", "password", requires=IS_EQUAL_TO(
request.post_vars.get(passfield, None),
error_message=self.messages.mismatched_password),
label=current.T("Confirm Password"))]
else:
extra_fields = []
form = SQLFORM(table_user,
fields=self.settings.register_fields,
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.register_button,
delete_label=self.messages.delete_label,
formstyle=formstyle,
separator=self.settings.label_separator,
extra_fields = extra_fields
)
captcha = self.settings.register_captcha or self.settings.captcha
if captcha:
addrow(form, captcha.label, captcha,
captcha.comment, self.settings.formstyle, 'captcha__row')
if self.settings.pre_registration_div:
addrow(form, '',
DIV(_id="pre-reg", *self.settings.pre_registration_div),
'', formstyle, '')
table_user.registration_key.default = key = web2py_uuid()
if form.accepts(request, session if self.csrf_prevention else None,
formname='register',
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
description = self.messages.group_description % form.vars
if self.settings.create_user_groups:
group_id = self.add_group(
self.settings.create_user_groups % form.vars, description)
self.add_membership(group_id, form.vars.id)
if self.settings.everybody_group_id:
self.add_membership(
self.settings.everybody_group_id, form.vars.id)
if self.settings.registration_requires_verification:
link = self.url(
self.settings.function, args=('verify_email', key), scheme=True)
d = dict(form.vars)
d.update(dict(key=key, link=link, username=form.vars[username]))
if not (self.settings.mailer and self.settings.mailer.send(
to=form.vars.email,
subject=self.messages.verify_email_subject,
message=self.messages.verify_email % d)):
self.db.rollback()
response.flash = self.messages.unable_send_email
return form
session.flash = self.messages.email_sent
if self.settings.registration_requires_approval and \
not self.settings.registration_requires_verification:
table_user[form.vars.id] = dict(registration_key='pending')
session.flash = self.messages.registration_pending
elif (not self.settings.registration_requires_verification or
self.settings.login_after_registration):
if not self.settings.registration_requires_verification:
table_user[form.vars.id] = dict(registration_key='')
session.flash = self.messages.registration_successful
user = table_user(**{username: form.vars[username]})
self.login_user(user)
session.flash = self.messages.logged_in
self.log_event(log, form.vars)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
return form
def is_logged_in(self):
"""
Checks if the user is logged in and returns True/False.
If so user is in auth.user as well as in session.auth.user
"""
if self.user:
return True
return False
def verify_email(self,
next=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Action used to verify the registration email
"""
key = getarg(-1)
table_user = self.table_user()
user = table_user(registration_key=key)
if not user:
redirect(self.settings.login_url)
if self.settings.registration_requires_approval:
user.update_record(registration_key='pending')
current.session.flash = self.messages.registration_pending
else:
user.update_record(registration_key='')
current.session.flash = self.messages.email_verified
if current.session.auth and current.session.auth.user:
current.session.auth.user.registration_key = user.registration_key
if log is DEFAULT:
log = self.messages['verify_email_log']
if next is DEFAULT:
next = self.settings.verify_email_next
if onaccept is DEFAULT:
onaccept = self.settings.verify_email_onaccept
self.log_event(log, user)
callback(onaccept, user)
redirect(next)
def retrieve_username(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to retrieve the user username
(only if there is a username field)
"""
table_user = self.table_user()
if not 'username' in table_user.fields:
raise HTTP(404)
request = current.request
response = current.response
session = current.session
captcha = self.settings.retrieve_username_captcha or \
(self.settings.retrieve_username_captcha != False and self.settings.captcha)
if not self.settings.mailer:
response.flash = self.messages.function_disabled
return ''
if next is DEFAULT:
next = self.get_vars_next() or self.settings.retrieve_username_next
if onvalidation is DEFAULT:
onvalidation = self.settings.retrieve_username_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.retrieve_username_onaccept
if log is DEFAULT:
log = self.messages['retrieve_username_log']
old_requires = table_user.email.requires
table_user.email.requires = [IS_IN_DB(self.db, table_user.email,
error_message=self.messages.invalid_email)]
form = SQLFORM(table_user,
fields=['email'],
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if captcha:
addrow(form, captcha.label, captcha,
captcha.comment, self.settings.formstyle, 'captcha__row')
if form.accepts(request, session if self.csrf_prevention else None,
formname='retrieve_username', dbio=False,
onvalidation=onvalidation, hideerror=self.settings.hideerror):
users = table_user._db(table_user.email==form.vars.email).select()
if not users:
current.session.flash = \
self.messages.invalid_email
redirect(self.url(args=request.args))
username = ', '.join(u.username for u in users)
self.settings.mailer.send(to=form.vars.email,
subject=self.messages.retrieve_username_subject,
message=self.messages.retrieve_username % dict(username=username))
session.flash = self.messages.email_sent
for user in users:
self.log_event(log, user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next)
table_user.email.requires = old_requires
return form
def random_password(self):
import string
import random
password = ''
specials = r'!#$*'
for i in range(0, 3):
password += random.choice(string.lowercase)
password += random.choice(string.uppercase)
password += random.choice(string.digits)
password += random.choice(specials)
return ''.join(random.sample(password, len(password)))
def reset_password_deprecated(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to reset the user password (deprecated)
"""
table_user = self.table_user()
request = current.request
response = current.response
session = current.session
if not self.settings.mailer:
response.flash = self.messages.function_disabled
return ''
if next is DEFAULT:
next = self.get_vars_next() or self.settings.retrieve_password_next
if onvalidation is DEFAULT:
onvalidation = self.settings.retrieve_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.retrieve_password_onaccept
if log is DEFAULT:
log = self.messages['retrieve_password_log']
old_requires = table_user.email.requires
table_user.email.requires = [IS_IN_DB(self.db, table_user.email,
error_message=self.messages.invalid_email)]
form = SQLFORM(table_user,
fields=['email'],
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request, session if self.csrf_prevention else None,
formname='retrieve_password', dbio=False,
onvalidation=onvalidation, hideerror=self.settings.hideerror):
user = table_user(email=form.vars.email)
if not user:
current.session.flash = \
self.messages.invalid_email
redirect(self.url(args=request.args))
elif user.registration_key in ('pending', 'disabled', 'blocked'):
current.session.flash = \
self.messages.registration_pending
redirect(self.url(args=request.args))
password = self.random_password()
passfield = self.settings.password_field
d = {
passfield: str(table_user[passfield].validate(password)[0]),
'registration_key': ''
}
user.update_record(**d)
if self.settings.mailer and \
self.settings.mailer.send(to=form.vars.email,
subject=self.messages.retrieve_password_subject,
message=self.messages.retrieve_password % dict(password=password)):
session.flash = self.messages.email_sent
else:
session.flash = self.messages.unable_to_send_email
self.log_event(log, user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next)
table_user.email.requires = old_requires
return form
def reset_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to reset the user password
"""
table_user = self.table_user()
request = current.request
session = current.session
if next is DEFAULT:
next = self.get_vars_next() or self.settings.reset_password_next
if self.settings.prevent_password_reset_attacks:
key = request.vars.key
if key:
session._reset_password_key = key
redirect(self.url(args='reset_password'))
else:
key = session._reset_password_key
else:
key = request.vars.key
try:
t0 = int(key.split('-')[0])
if time.time() - t0 > 60 * 60 * 24:
raise Exception
user = table_user(reset_password_key=key)
if not user:
raise Exception
except Exception:
session.flash = self.messages.invalid_reset_password
redirect(next, client_side=self.settings.client_side)
passfield = self.settings.password_field
form = SQLFORM.factory(
Field('new_password', 'password',
label=self.messages.new_password,
requires=self.table_user()[passfield].requires),
Field('new_password2', 'password',
label=self.messages.verify_password,
requires=[IS_EXPR(
'value==%s' % repr(request.vars.new_password),
self.messages.mismatched_password)]),
submit_button=self.messages.password_reset_button,
hidden=dict(_next=next),
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request, session,
hideerror=self.settings.hideerror):
user.update_record(
**{passfield: str(form.vars.new_password),
'registration_key': '',
'reset_password_key': ''})
session.flash = self.messages.password_changed
if self.settings.login_after_password_change:
self.login_user(user)
redirect(next, client_side=self.settings.client_side)
return form
def request_reset_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to reset the user password
"""
table_user = self.table_user()
request = current.request
response = current.response
session = current.session
captcha = self.settings.retrieve_password_captcha or \
(self.settings.retrieve_password_captcha != False and self.settings.captcha)
if next is DEFAULT:
next = self.get_vars_next() or self.settings.request_reset_password_next
if not self.settings.mailer:
response.flash = self.messages.function_disabled
return ''
if onvalidation is DEFAULT:
onvalidation = self.settings.reset_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.reset_password_onaccept
if log is DEFAULT:
log = self.messages['reset_password_log']
userfield = self.settings.login_userfield or 'username' \
if 'username' in table_user.fields else 'email'
if userfield == 'email':
table_user.email.requires = [
IS_EMAIL(error_message=self.messages.invalid_email),
IS_IN_DB(self.db, table_user.email,
error_message=self.messages.invalid_email)]
if not self.settings.email_case_sensitive:
table_user.email.requires.insert(0, IS_LOWER())
else:
table_user.username.requires = [
IS_IN_DB(self.db, table_user.username,
error_message=self.messages.invalid_username)]
if not self.settings.username_case_sensitive:
table_user.username.requires.insert(0, IS_LOWER())
form = SQLFORM(table_user,
fields=[userfield],
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.password_reset_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if captcha:
addrow(form, captcha.label, captcha,
captcha.comment, self.settings.formstyle, 'captcha__row')
if form.accepts(request, session if self.csrf_prevention else None,
formname='reset_password', dbio=False,
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
user = table_user(**{userfield:form.vars.get(userfield)})
if not user:
session.flash = self.messages['invalid_%s' % userfield]
redirect(self.url(args=request.args),
client_side=self.settings.client_side)
elif user.registration_key in ('pending', 'disabled', 'blocked'):
session.flash = self.messages.registration_pending
redirect(self.url(args=request.args),
client_side=self.settings.client_side)
if self.email_reset_password(user):
session.flash = self.messages.email_sent
else:
session.flash = self.messages.unable_to_send_email
self.log_event(log, user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
return form
def email_reset_password(self, user):
reset_password_key = str(int(time.time())) + '-' + web2py_uuid()
link = self.url(self.settings.function,
args=('reset_password',), vars={'key': reset_password_key},
scheme=True)
d = dict(user)
d.update(dict(key=reset_password_key, link=link))
if self.settings.mailer and self.settings.mailer.send(
to=user.email,
subject=self.messages.reset_password_subject,
message=self.messages.reset_password % d):
user.update_record(reset_password_key=reset_password_key)
return True
return False
def retrieve_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
if self.settings.reset_password_requires_verification:
return self.request_reset_password(next, onvalidation, onaccept, log)
else:
return self.reset_password_deprecated(next, onvalidation, onaccept, log)
def change_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form that lets the user change password
"""
if not self.is_logged_in():
redirect(self.settings.login_url,
client_side=self.settings.client_side)
db = self.db
table_user = self.table_user()
s = db(table_user.id == self.user.id)
request = current.request
session = current.session
if next is DEFAULT:
next = self.get_vars_next() or self.settings.change_password_next
if onvalidation is DEFAULT:
onvalidation = self.settings.change_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.change_password_onaccept
if log is DEFAULT:
log = self.messages['change_password_log']
passfield = self.settings.password_field
requires = table_user[passfield].requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
requires = filter(lambda t: isinstance(t, CRYPT), requires)
if requires:
requires[0].min_length = 0
form = SQLFORM.factory(
Field('old_password', 'password', requires=requires,
label=self.messages.old_password),
Field('new_password', 'password',
label=self.messages.new_password,
requires=table_user[passfield].requires),
Field('new_password2', 'password',
label=self.messages.verify_password,
requires=[IS_EXPR(
'value==%s' % repr(request.vars.new_password),
self.messages.mismatched_password)]),
submit_button=self.messages.password_change_button,
hidden=dict(_next=next),
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request, session,
formname='change_password',
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
current_user = s.select(limitby=(0, 1), orderby_on_limitby=False).first()
if not form.vars['old_password'] == current_user[passfield]:
form.errors['old_password'] = self.messages.invalid_password
else:
d = {passfield: str(form.vars.new_password)}
s.update(**d)
session.flash = self.messages.password_changed
self.log_event(log, self.user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
return form
def profile(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form that lets the user change his/her profile
"""
table_user = self.table_user()
if not self.is_logged_in():
redirect(self.settings.login_url,
client_side=self.settings.client_side)
passfield = self.settings.password_field
table_user[passfield].writable = False
request = current.request
session = current.session
if next is DEFAULT:
next = self.get_vars_next() or self.settings.profile_next
if onvalidation is DEFAULT:
onvalidation = self.settings.profile_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.profile_onaccept
if log is DEFAULT:
log = self.messages['profile_log']
form = SQLFORM(
table_user,
self.user.id,
fields=self.settings.profile_fields,
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.profile_save_button,
delete_label=self.messages.delete_label,
upload=self.settings.download_url,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator,
deletable=self.settings.allow_delete_accounts,
)
if form.accepts(request, session,
formname='profile',
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
self.user.update(table_user._filter_fields(form.vars))
session.flash = self.messages.profile_updated
self.log_event(log, self.user)
callback(onaccept, form)
if form.deleted:
return self.logout()
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
return form
def run_login_onaccept(self):
onaccept = self.settings.login_onaccept
if onaccept:
form = Storage(dict(vars=self.user))
if not isinstance(onaccept, (list, tuple)):
onaccept = [onaccept]
for callback in onaccept:
callback(form)
def is_impersonating(self):
return self.is_logged_in() and 'impersonator' in current.session.auth
def impersonate(self, user_id=DEFAULT):
"""
To use this make a POST to
`http://..../impersonate request.post_vars.user_id=<id>`
Set request.post_vars.user_id to 0 to restore original user.
requires impersonator is logged in and::
has_permission('impersonate', 'auth_user', user_id)
"""
request = current.request
session = current.session
auth = session.auth
table_user = self.table_user()
if not self.is_logged_in():
raise HTTP(401, "Not Authorized")
current_id = auth.user.id
requested_id = user_id
if user_id is DEFAULT:
user_id = current.request.post_vars.user_id
if user_id and user_id != self.user.id and user_id != '0':
if not self.has_permission('impersonate',
self.table_user(),
user_id):
raise HTTP(403, "Forbidden")
user = table_user(user_id)
if not user:
raise HTTP(401, "Not Authorized")
auth.impersonator = pickle.dumps(session, pickle.HIGHEST_PROTOCOL)
auth.user.update(
table_user._filter_fields(user, True))
self.user = auth.user
self.update_groups()
log = self.messages['impersonate_log']
self.log_event(log, dict(id=current_id, other_id=auth.user.id))
self.run_login_onaccept()
elif user_id in (0, '0'):
if self.is_impersonating():
session.clear()
session.update(pickle.loads(auth.impersonator))
self.user = session.auth.user
self.update_groups()
self.run_login_onaccept()
return None
if requested_id is DEFAULT and not request.post_vars:
return SQLFORM.factory(Field('user_id', 'integer'))
return SQLFORM(table_user, user.id, readonly=True)
def update_groups(self):
if not self.user:
return
user_groups = self.user_groups = {}
if current.session.auth:
current.session.auth.user_groups = self.user_groups
table_group = self.table_group()
table_membership = self.table_membership()
memberships = self.db(
table_membership.user_id == self.user.id).select()
for membership in memberships:
group = table_group(membership.group_id)
if group:
user_groups[membership.group_id] = group.role
def groups(self):
"""
Displays the groups and their roles for the logged in user
"""
if not self.is_logged_in():
redirect(self.settings.login_url)
table_membership = self.table_membership()
memberships = self.db(
table_membership.user_id == self.user.id).select()
table = TABLE()
for membership in memberships:
table_group = self.table_group()
groups = self.db(table_group.id == membership.group_id).select()
if groups:
group = groups[0]
table.append(TR(H3(group.role, '(%s)' % group.id)))
table.append(TR(P(group.description)))
if not memberships:
return None
return table
def not_authorized(self):
"""
You can change the view for this page to make it look as you like
"""
if current.request.ajax:
raise HTTP(403, 'ACCESS DENIED')
return self.messages.access_denied
def requires(self, condition, requires_login=True, otherwise=None):
"""
Decorator that prevents access to action if not logged in
"""
def decorator(action):
def f(*a, **b):
basic_allowed, basic_accepted, user = self.basic()
user = user or self.user
if requires_login:
if not user:
if current.request.ajax:
raise HTTP(401, self.messages.ajax_failed_authentication)
elif not otherwise is None:
if callable(otherwise):
return otherwise()
redirect(otherwise)
elif self.settings.allow_basic_login_only or \
basic_accepted or current.request.is_restful:
raise HTTP(403, "Not authorized")
else:
next = self.here()
current.session.flash = current.response.flash
return call_or_redirect(
self.settings.on_failed_authentication,
self.settings.login_url +
'?_next=' + urllib.quote(next))
if callable(condition):
flag = condition()
else:
flag = condition
if not flag:
current.session.flash = self.messages.access_denied
return call_or_redirect(
self.settings.on_failed_authorization)
return action(*a, **b)
f.__doc__ = action.__doc__
f.__name__ = action.__name__
f.__dict__.update(action.__dict__)
return f
return decorator
def requires_login(self, otherwise=None):
"""
Decorator that prevents access to action if not logged in
"""
return self.requires(True, otherwise=otherwise)
def requires_membership(self, role=None, group_id=None, otherwise=None):
"""
Decorator that prevents access to action if not logged in or
if user logged in is not a member of group_id.
If role is provided instead of group_id then the
group_id is calculated.
"""
def has_membership(self=self, group_id=group_id, role=role):
return self.has_membership(group_id=group_id, role=role)
return self.requires(has_membership, otherwise=otherwise)
def requires_permission(self, name, table_name='', record_id=0,
otherwise=None):
"""
Decorator that prevents access to action if not logged in or
if user logged in is not a member of any group (role) that
has 'name' access to 'table_name', 'record_id'.
"""
def has_permission(self=self, name=name, table_name=table_name, record_id=record_id):
return self.has_permission(name, table_name, record_id)
return self.requires(has_permission, otherwise=otherwise)
def requires_signature(self, otherwise=None, hash_vars=True):
"""
Decorator that prevents access to action if not logged in or
if user logged in is not a member of group_id.
If role is provided instead of group_id then the
group_id is calculated.
"""
def verify():
return URL.verify(current.request, user_signature=True, hash_vars=hash_vars)
return self.requires(verify, otherwise)
def add_group(self, role, description=''):
"""
Creates a group associated to a role
"""
group_id = self.table_group().insert(
role=role, description=description)
self.log_event(self.messages['add_group_log'],
dict(group_id=group_id, role=role))
return group_id
def del_group(self, group_id):
"""
Deletes a group
"""
self.db(self.table_group().id == group_id).delete()
self.db(self.table_membership().group_id == group_id).delete()
self.db(self.table_permission().group_id == group_id).delete()
if group_id in self.user_groups: del self.user_groups[group_id]
self.log_event(self.messages.del_group_log, dict(group_id=group_id))
def id_group(self, role):
"""
Returns the group_id of the group specified by the role
"""
rows = self.db(self.table_group().role == role).select()
if not rows:
return None
return rows[0].id
def user_group(self, user_id=None):
"""
Returns the group_id of the group uniquely associated to this user
i.e. `role=user:[user_id]`
"""
return self.id_group(self.user_group_role(user_id))
def user_group_role(self, user_id=None):
if not self.settings.create_user_groups:
return None
if user_id:
user = self.table_user()[user_id]
else:
user = self.user
return self.settings.create_user_groups % user
def has_membership(self, group_id=None, user_id=None, role=None):
"""
Checks if user is member of group_id or role
"""
group_id = group_id or self.id_group(role)
try:
group_id = int(group_id)
except:
group_id = self.id_group(group_id)
if not user_id and self.user:
user_id = self.user.id
membership = self.table_membership()
if group_id and user_id and self.db((membership.user_id == user_id)
& (membership.group_id == group_id)).select():
r = True
else:
r = False
self.log_event(self.messages['has_membership_log'],
dict(user_id=user_id, group_id=group_id, check=r))
return r
def add_membership(self, group_id=None, user_id=None, role=None):
"""
Gives user_id membership of group_id or role
if user is None than user_id is that of current logged in user
"""
group_id = group_id or self.id_group(role)
try:
group_id = int(group_id)
except:
group_id = self.id_group(group_id)
if not user_id and self.user:
user_id = self.user.id
membership = self.table_membership()
record = membership(user_id=user_id, group_id=group_id)
if record:
return record.id
else:
id = membership.insert(group_id=group_id, user_id=user_id)
if role:
self.user_groups[group_id] = role
else:
self.update_groups()
self.log_event(self.messages['add_membership_log'],
dict(user_id=user_id, group_id=group_id))
return id
def del_membership(self, group_id=None, user_id=None, role=None):
"""
Revokes membership from group_id to user_id
if user_id is None than user_id is that of current logged in user
"""
group_id = group_id or self.id_group(role)
if not user_id and self.user:
user_id = self.user.id
membership = self.table_membership()
self.log_event(self.messages['del_membership_log'],
dict(user_id=user_id, group_id=group_id))
ret = self.db(membership.user_id
== user_id)(membership.group_id
== group_id).delete()
if group_id in self.user_groups: del self.user_groups[group_id]
return ret
def has_permission(self,
name='any',
table_name='',
record_id=0,
user_id=None,
group_id=None,
):
"""
Checks if user_id or current logged in user is member of a group
that has 'name' permission on 'table_name' and 'record_id'
if group_id is passed, it checks whether the group has the permission
"""
if not group_id and self.settings.everybody_group_id and \
self.has_permission(
name, table_name, record_id, user_id=None,
group_id=self.settings.everybody_group_id):
return True
if not user_id and not group_id and self.user:
user_id = self.user.id
if user_id:
membership = self.table_membership()
rows = self.db(membership.user_id
== user_id).select(membership.group_id)
groups = set([row.group_id for row in rows])
if group_id and not group_id in groups:
return False
else:
groups = set([group_id])
permission = self.table_permission()
rows = self.db(permission.name == name)(permission.table_name
== str(table_name))(permission.record_id
== record_id).select(permission.group_id)
groups_required = set([row.group_id for row in rows])
if record_id:
rows = self.db(permission.name
== name)(permission.table_name
== str(table_name))(permission.record_id
== 0).select(permission.group_id)
groups_required = groups_required.union(set([row.group_id
for row in rows]))
if groups.intersection(groups_required):
r = True
else:
r = False
if user_id:
self.log_event(self.messages['has_permission_log'],
dict(user_id=user_id, name=name,
table_name=table_name, record_id=record_id))
return r
def add_permission(self,
group_id,
name='any',
table_name='',
record_id=0,
):
"""
Gives group_id 'name' access to 'table_name' and 'record_id'
"""
permission = self.table_permission()
if group_id == 0:
group_id = self.user_group()
record = self.db(permission.group_id == group_id)(permission.name == name)(permission.table_name == str(table_name))(
permission.record_id == long(record_id)).select(limitby=(0, 1), orderby_on_limitby=False).first()
if record:
id = record.id
else:
id = permission.insert(group_id=group_id, name=name,
table_name=str(table_name),
record_id=long(record_id))
self.log_event(self.messages['add_permission_log'],
dict(permission_id=id, group_id=group_id,
name=name, table_name=table_name,
record_id=record_id))
return id
def del_permission(self,
group_id,
name='any',
table_name='',
record_id=0,
):
"""
Revokes group_id 'name' access to 'table_name' and 'record_id'
"""
permission = self.table_permission()
self.log_event(self.messages['del_permission_log'],
dict(group_id=group_id, name=name,
table_name=table_name, record_id=record_id))
return self.db(permission.group_id == group_id)(permission.name
== name)(permission.table_name
== str(table_name))(permission.record_id
== long(record_id)).delete()
def accessible_query(self, name, table, user_id=None):
"""
Returns a query with all accessible records for user_id or
the current logged in user
this method does not work on GAE because uses JOIN and IN
Example:
Use as::
db(auth.accessible_query('read', db.mytable)).select(db.mytable.ALL)
"""
if not user_id:
user_id = self.user_id
db = self.db
if isinstance(table, str) and table in self.db.tables():
table = self.db[table]
elif isinstance(table, (Set, Query)):
if isinstance(table, Set):
cquery = table.query
else:
cquery = table
tablenames = db._adapter.tables(cquery)
for tablename in tablenames:
cquery &= self.accessible_query(name, tablename,
user_id=user_id)
return cquery
if not isinstance(table, str) and\
self.has_permission(name, table, 0, user_id):
return table.id > 0
membership = self.table_membership()
permission = self.table_permission()
query = table.id.belongs(
db(membership.user_id == user_id)
(membership.group_id == permission.group_id)
(permission.name == name)
(permission.table_name == table)
._select(permission.record_id))
if self.settings.everybody_group_id:
query |= table.id.belongs(
db(permission.group_id == self.settings.everybody_group_id)
(permission.name == name)
(permission.table_name == table)
._select(permission.record_id))
return query
@staticmethod
def archive(form,
archive_table=None,
current_record='current_record',
archive_current=False,
fields=None):
"""
If you have a table (db.mytable) that needs full revision history you
can just do::
form=crud.update(db.mytable,myrecord,onaccept=auth.archive)
or::
form=SQLFORM(db.mytable,myrecord).process(onaccept=auth.archive)
crud.archive will define a new table "mytable_archive" and store
a copy of the current record (if archive_current=True)
or a copy of the previous record (if archive_current=False)
in the newly created table including a reference
to the current record.
fields allows to specify extra fields that need to be archived.
If you want to access such table you need to define it yourself
in a model::
db.define_table('mytable_archive',
Field('current_record',db.mytable),
db.mytable)
Notice such table includes all fields of db.mytable plus one: current_record.
crud.archive does not timestamp the stored record unless your original table
has a fields like::
db.define_table(...,
Field('saved_on','datetime',
default=request.now,update=request.now,writable=False),
Field('saved_by',auth.user,
default=auth.user_id,update=auth.user_id,writable=False),
there is nothing special about these fields since they are filled before
the record is archived.
If you want to change the archive table name and the name of the reference field
you can do, for example::
db.define_table('myhistory',
Field('parent_record',db.mytable),
db.mytable)
and use it as::
form=crud.update(db.mytable,myrecord,
onaccept=lambda form:crud.archive(form,
archive_table=db.myhistory,
current_record='parent_record'))
"""
if not archive_current and not form.record:
return None
table = form.table
if not archive_table:
archive_table_name = '%s_archive' % table
if not archive_table_name in table._db:
table._db.define_table(
archive_table_name,
Field(current_record, table),
*[field.clone(unique=False) for field in table])
archive_table = table._db[archive_table_name]
new_record = {current_record: form.vars.id}
for fieldname in archive_table.fields:
if not fieldname in ['id', current_record]:
if archive_current and fieldname in form.vars:
new_record[fieldname] = form.vars[fieldname]
elif form.record and fieldname in form.record:
new_record[fieldname] = form.record[fieldname]
if fields:
new_record.update(fields)
id = archive_table.insert(**new_record)
return id
def wiki(self,
slug=None,
env=None,
render='markmin',
manage_permissions=False,
force_prefix='',
restrict_search=False,
resolve=True,
extra=None,
menu_groups=None,
templates=None,
migrate=True,
controller=None,
function=None,
force_render=False,
groups=None):
if controller and function:
resolve = False
if not hasattr(self, '_wiki'):
self._wiki = Wiki(self, render=render,
manage_permissions=manage_permissions,
force_prefix=force_prefix,
restrict_search=restrict_search,
env=env, extra=extra or {},
menu_groups=menu_groups,
templates=templates,
migrate=migrate,
controller=controller,
function=function,
groups=groups)
else:
self._wiki.env.update(env or {})
wiki = None
if resolve:
if slug:
wiki = self._wiki.read(slug, force_render)
if isinstance(wiki, dict) and wiki.has_key('content'):
wiki = wiki['content']
else:
wiki = self._wiki()
if isinstance(wiki, basestring):
wiki = XML(wiki)
return wiki
def wikimenu(self):
"""To be used in menu.py for app wide wiki menus"""
if (hasattr(self, "_wiki") and
self._wiki.settings.controller and
self._wiki.settings.function):
self._wiki.automenu()
class Crud(object):
def url(self, f=None, args=None, vars=None):
"""
This should point to the controller that exposes
download and crud
"""
if args is None:
args = []
if vars is None:
vars = {}
return URL(c=self.settings.controller, f=f, args=args, vars=vars)
def __init__(self, environment, db=None, controller='default'):
self.db = db
if not db and environment and isinstance(environment, DAL):
self.db = environment
elif not db:
raise SyntaxError("must pass db as first or second argument")
self.environment = current
settings = self.settings = Settings()
settings.auth = None
settings.logger = None
settings.create_next = None
settings.update_next = None
settings.controller = controller
settings.delete_next = self.url()
settings.download_url = self.url('download')
settings.create_onvalidation = StorageList()
settings.update_onvalidation = StorageList()
settings.delete_onvalidation = StorageList()
settings.create_onaccept = StorageList()
settings.update_onaccept = StorageList()
settings.update_ondelete = StorageList()
settings.delete_onaccept = StorageList()
settings.update_deletable = True
settings.showid = False
settings.keepvalues = False
settings.create_captcha = None
settings.update_captcha = None
settings.captcha = None
settings.formstyle = 'table3cols'
settings.label_separator = ': '
settings.hideerror = False
settings.detect_record_change = True
settings.hmac_key = None
settings.lock_keys = True
messages = self.messages = Messages(current.T)
messages.submit_button = 'Submit'
messages.delete_label = 'Check to delete'
messages.record_created = 'Record Created'
messages.record_updated = 'Record Updated'
messages.record_deleted = 'Record Deleted'
messages.update_log = 'Record %(id)s updated'
messages.create_log = 'Record %(id)s created'
messages.read_log = 'Record %(id)s read'
messages.delete_log = 'Record %(id)s deleted'
messages.lock_keys = True
def __call__(self):
args = current.request.args
if len(args) < 1:
raise HTTP(404)
elif args[0] == 'tables':
return self.tables()
elif len(args) > 1 and not args(1) in self.db.tables:
raise HTTP(404)
table = self.db[args(1)]
if args[0] == 'create':
return self.create(table)
elif args[0] == 'select':
return self.select(table, linkto=self.url(args='read'))
elif args[0] == 'search':
form, rows = self.search(table, linkto=self.url(args='read'))
return DIV(form, SQLTABLE(rows))
elif args[0] == 'read':
return self.read(table, args(2))
elif args[0] == 'update':
return self.update(table, args(2))
elif args[0] == 'delete':
return self.delete(table, args(2))
else:
raise HTTP(404)
def log_event(self, message, vars):
if self.settings.logger:
self.settings.logger.log_event(message, vars, origin='crud')
def has_permission(self, name, table, record=0):
if not self.settings.auth:
return True
try:
record_id = record.id
except:
record_id = record
return self.settings.auth.has_permission(name, str(table), record_id)
def tables(self):
return TABLE(*[TR(A(name,
_href=self.url(args=('select', name))))
for name in self.db.tables])
@staticmethod
def archive(form, archive_table=None, current_record='current_record'):
return Auth.archive(form, archive_table=archive_table,
current_record=current_record)
def update(self,
table,
record,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
ondelete=DEFAULT,
log=DEFAULT,
message=DEFAULT,
deletable=DEFAULT,
formname=DEFAULT,
**attributes
):
if not (isinstance(table, Table) or table in self.db.tables) \
or (isinstance(record, str) and not str(record).isdigit()):
raise HTTP(404)
if not isinstance(table, Table):
table = self.db[table]
try:
record_id = record.id
except:
record_id = record or 0
if record_id and not self.has_permission('update', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
if not record_id and not self.has_permission('create', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
request = current.request
response = current.response
session = current.session
if request.extension == 'json' and request.vars.json:
request.vars.update(json_parser.loads(request.vars.json))
if next is DEFAULT:
next = request.get_vars._next \
or request.post_vars._next \
or self.settings.update_next
if onvalidation is DEFAULT:
onvalidation = self.settings.update_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.update_onaccept
if ondelete is DEFAULT:
ondelete = self.settings.update_ondelete
if log is DEFAULT:
log = self.messages['update_log']
if deletable is DEFAULT:
deletable = self.settings.update_deletable
if message is DEFAULT:
message = self.messages.record_updated
if not 'hidden' in attributes:
attributes['hidden'] = {}
attributes['hidden']['_next'] = next
form = SQLFORM(
table,
record,
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
deletable=deletable,
upload=self.settings.download_url,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator,
**attributes # contains hidden
)
self.accepted = False
self.deleted = False
captcha = self.settings.update_captcha or self.settings.captcha
if record and captcha:
addrow(form, captcha.label, captcha, captcha.comment,
self.settings.formstyle, 'captcha__row')
captcha = self.settings.create_captcha or self.settings.captcha
if not record and captcha:
addrow(form, captcha.label, captcha, captcha.comment,
self.settings.formstyle, 'captcha__row')
if not request.extension in ('html', 'load'):
(_session, _formname) = (None, None)
else:
(_session, _formname) = (
session, '%s/%s' % (table._tablename, form.record_id))
if not formname is DEFAULT:
_formname = formname
keepvalues = self.settings.keepvalues
if request.vars.delete_this_record:
keepvalues = False
if isinstance(onvalidation, StorageList):
onvalidation = onvalidation.get(table._tablename, [])
if form.accepts(request, _session, formname=_formname,
onvalidation=onvalidation, keepvalues=keepvalues,
hideerror=self.settings.hideerror,
detect_record_change=self.settings.detect_record_change):
self.accepted = True
response.flash = message
if log:
self.log_event(log, form.vars)
if request.vars.delete_this_record:
self.deleted = True
message = self.messages.record_deleted
callback(ondelete, form, table._tablename)
response.flash = message
callback(onaccept, form, table._tablename)
if not request.extension in ('html', 'load'):
raise HTTP(200, 'RECORD CREATED/UPDATED')
if isinstance(next, (list, tuple)): # fix issue with 2.6
next = next[0]
if next: # Only redirect when explicit
next = replace_id(next, form)
session.flash = response.flash
redirect(next)
elif not request.extension in ('html', 'load'):
raise HTTP(401, serializers.json(dict(errors=form.errors)))
return form
def create(self,
table,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
message=DEFAULT,
formname=DEFAULT,
**attributes
):
if next is DEFAULT:
next = self.settings.create_next
if onvalidation is DEFAULT:
onvalidation = self.settings.create_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.create_onaccept
if log is DEFAULT:
log = self.messages['create_log']
if message is DEFAULT:
message = self.messages.record_created
return self.update(
table,
None,
next=next,
onvalidation=onvalidation,
onaccept=onaccept,
log=log,
message=message,
deletable=False,
formname=formname,
**attributes
)
def read(self, table, record):
if not (isinstance(table, Table) or table in self.db.tables) \
or (isinstance(record, str) and not str(record).isdigit()):
raise HTTP(404)
if not isinstance(table, Table):
table = self.db[table]
if not self.has_permission('read', table, record):
redirect(self.settings.auth.settings.on_failed_authorization)
form = SQLFORM(
table,
record,
readonly=True,
comments=False,
upload=self.settings.download_url,
showid=self.settings.showid,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if not current.request.extension in ('html', 'load'):
return table._filter_fields(form.record, id=True)
return form
def delete(self,
table,
record_id,
next=DEFAULT,
message=DEFAULT,
):
if not (isinstance(table, Table) or table in self.db.tables):
raise HTTP(404)
if not isinstance(table, Table):
table = self.db[table]
if not self.has_permission('delete', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
request = current.request
session = current.session
if next is DEFAULT:
next = request.get_vars._next \
or request.post_vars._next \
or self.settings.delete_next
if message is DEFAULT:
message = self.messages.record_deleted
record = table[record_id]
if record:
callback(self.settings.delete_onvalidation, record)
del table[record_id]
callback(self.settings.delete_onaccept, record, table._tablename)
session.flash = message
redirect(next)
def rows(
self,
table,
query=None,
fields=None,
orderby=None,
limitby=None,
):
if not (isinstance(table, Table) or table in self.db.tables):
raise HTTP(404)
if not self.has_permission('select', table):
redirect(self.settings.auth.settings.on_failed_authorization)
#if record_id and not self.has_permission('select', table):
# redirect(self.settings.auth.settings.on_failed_authorization)
if not isinstance(table, Table):
table = self.db[table]
if not query:
query = table.id > 0
if not fields:
fields = [field for field in table if field.readable]
else:
fields = [table[f] if isinstance(f, str) else f for f in fields]
rows = self.db(query).select(*fields, **dict(orderby=orderby,
limitby=limitby))
return rows
def select(self,
table,
query=None,
fields=None,
orderby=None,
limitby=None,
headers=None,
**attr
):
headers = headers or {}
rows = self.rows(table, query, fields, orderby, limitby)
if not rows:
return None # Nicer than an empty table.
if not 'upload' in attr:
attr['upload'] = self.url('download')
if not current.request.extension in ('html', 'load'):
return rows.as_list()
if not headers:
if isinstance(table, str):
table = self.db[table]
headers = dict((str(k), k.label) for k in table)
return SQLTABLE(rows, headers=headers, **attr)
def get_format(self, field):
rtable = field._db[field.type[10:]]
format = rtable.get('_format', None)
if format and isinstance(format, str):
return format[2:-2]
return field.name
def get_query(self, field, op, value, refsearch=False):
try:
if refsearch:
format = self.get_format(field)
if op == 'equals':
if not refsearch:
return field == value
else:
return lambda row: row[field.name][format] == value
elif op == 'not equal':
if not refsearch:
return field != value
else:
return lambda row: row[field.name][format] != value
elif op == 'greater than':
if not refsearch:
return field > value
else:
return lambda row: row[field.name][format] > value
elif op == 'less than':
if not refsearch:
return field < value
else:
return lambda row: row[field.name][format] < value
elif op == 'starts with':
if not refsearch:
return field.like(value + '%')
else:
return lambda row: str(row[field.name][format]).startswith(value)
elif op == 'ends with':
if not refsearch:
return field.like('%' + value)
else:
return lambda row: str(row[field.name][format]).endswith(value)
elif op == 'contains':
if not refsearch:
return field.like('%' + value + '%')
else:
return lambda row: value in row[field.name][format]
except:
return None
def search(self, *tables, **args):
"""
Creates a search form and its results for a table
Examples:
Use as::
form, results = crud.search(db.test,
queries = ['equals', 'not equal', 'contains'],
query_labels={'equals':'Equals',
'not equal':'Not equal'},
fields = ['id','children'],
field_labels = {
'id':'ID','children':'Children'},
zero='Please choose',
query = (db.test.id > 0)&(db.test.id != 3) )
"""
table = tables[0]
fields = args.get('fields', table.fields)
validate = args.get('validate', True)
request = current.request
db = self.db
if not (isinstance(table, Table) or table in db.tables):
raise HTTP(404)
attributes = {}
for key in ('orderby', 'groupby', 'left', 'distinct', 'limitby', 'cache'):
if key in args:
attributes[key] = args[key]
tbl = TABLE()
selected = []
refsearch = []
results = []
showall = args.get('showall', False)
if showall:
selected = fields
chkall = args.get('chkall', False)
if chkall:
for f in fields:
request.vars['chk%s' % f] = 'on'
ops = args.get('queries', [])
zero = args.get('zero', '')
if not ops:
ops = ['equals', 'not equal', 'greater than',
'less than', 'starts with',
'ends with', 'contains']
ops.insert(0, zero)
query_labels = args.get('query_labels', {})
query = args.get('query', table.id > 0)
field_labels = args.get('field_labels', {})
for field in fields:
field = table[field]
if not field.readable:
continue
fieldname = field.name
chkval = request.vars.get('chk' + fieldname, None)
txtval = request.vars.get('txt' + fieldname, None)
opval = request.vars.get('op' + fieldname, None)
row = TR(TD(INPUT(_type="checkbox", _name="chk" + fieldname,
_disabled=(field.type == 'id'),
value=(field.type == 'id' or chkval == 'on'))),
TD(field_labels.get(fieldname, field.label)),
TD(SELECT([OPTION(query_labels.get(op, op),
_value=op) for op in ops],
_name="op" + fieldname,
value=opval)),
TD(INPUT(_type="text", _name="txt" + fieldname,
_value=txtval, _id='txt' + fieldname,
_class=str(field.type))))
tbl.append(row)
if request.post_vars and (chkval or field.type == 'id'):
if txtval and opval != '':
if field.type[0:10] == 'reference ':
refsearch.append(self.get_query(field, opval, txtval, refsearch=True))
elif validate:
value, error = field.validate(txtval)
if not error:
### TODO deal with 'starts with', 'ends with', 'contains' on GAE
query &= self.get_query(field, opval, value)
else:
row[3].append(DIV(error, _class='error'))
else:
query &= self.get_query(field, opval, txtval)
selected.append(field)
form = FORM(tbl, INPUT(_type="submit"))
if selected:
try:
results = db(query).select(*selected, **attributes)
for r in refsearch:
results = results.find(r)
except: # hmmm, we should do better here
results = None
return form, results
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor()))
def fetch(url, data=None, headers=None,
cookie=Cookie.SimpleCookie(),
user_agent='Mozilla/5.0'):
headers = headers or {}
if not data is None:
data = urllib.urlencode(data)
if user_agent:
headers['User-agent'] = user_agent
headers['Cookie'] = ' '.join(
['%s=%s;' % (c.key, c.value) for c in cookie.values()])
try:
from google.appengine.api import urlfetch
except ImportError:
req = urllib2.Request(url, data, headers)
html = urllib2.urlopen(req).read()
else:
method = ((data is None) and urlfetch.GET) or urlfetch.POST
while url is not None:
response = urlfetch.fetch(url=url, payload=data,
method=method, headers=headers,
allow_truncated=False, follow_redirects=False,
deadline=10)
# next request will be a get, so no need to send the data again
data = None
method = urlfetch.GET
# load cookies from the response
cookie.load(response.headers.get('set-cookie', ''))
url = response.headers.get('location')
html = response.content
return html
regex_geocode = \
re.compile(r"""<geometry>[\W]*?<location>[\W]*?<lat>(?P<la>[^<]*)</lat>[\W]*?<lng>(?P<lo>[^<]*)</lng>[\W]*?</location>""")
def geocode(address):
try:
a = urllib.quote(address)
txt = fetch('http://maps.googleapis.com/maps/api/geocode/xml?sensor=false&address=%s'
% a)
item = regex_geocode.search(txt)
(la, lo) = (float(item.group('la')), float(item.group('lo')))
return (la, lo)
except:
return (0.0, 0.0)
def reverse_geocode(lat, lng, lang=None):
""" Try to get an approximate address for a given latitude, longitude. """
if not lang:
lang = current.T.accepted_language
try:
return json_parser.loads(fetch('http://maps.googleapis.com/maps/api/geocode/json?latlng=%(lat)s,%(lng)s&language=%(lang)s' % locals()))['results'][0]['formatted_address']
except:
return ''
def universal_caller(f, *a, **b):
c = f.func_code.co_argcount
n = f.func_code.co_varnames[:c]
defaults = f.func_defaults or []
pos_args = n[0:-len(defaults)]
named_args = n[-len(defaults):]
arg_dict = {}
# Fill the arg_dict with name and value for the submitted, positional values
for pos_index, pos_val in enumerate(a[:c]):
arg_dict[n[pos_index]] = pos_val # n[pos_index] is the name of the argument
# There might be pos_args left, that are sent as named_values. Gather them as well.
# If a argument already is populated with values we simply replaces them.
for arg_name in pos_args[len(arg_dict):]:
if arg_name in b:
arg_dict[arg_name] = b[arg_name]
if len(arg_dict) >= len(pos_args):
# All the positional arguments is found. The function may now be called.
# However, we need to update the arg_dict with the values from the named arguments as well.
for arg_name in named_args:
if arg_name in b:
arg_dict[arg_name] = b[arg_name]
return f(**arg_dict)
# Raise an error, the function cannot be called.
raise HTTP(404, "Object does not exist")
class Service(object):
def __init__(self, environment=None):
self.run_procedures = {}
self.csv_procedures = {}
self.xml_procedures = {}
self.rss_procedures = {}
self.json_procedures = {}
self.jsonrpc_procedures = {}
self.jsonrpc2_procedures = {}
self.xmlrpc_procedures = {}
self.amfrpc_procedures = {}
self.amfrpc3_procedures = {}
self.soap_procedures = {}
def run(self, f):
"""
Example:
Use as::
service = Service()
@service.run
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/run/myfunction?a=3&b=4
"""
self.run_procedures[f.__name__] = f
return f
def csv(self, f):
"""
Example:
Use as::
service = Service()
@service.csv
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/csv/myfunction?a=3&b=4
"""
self.run_procedures[f.__name__] = f
return f
def xml(self, f):
"""
Example:
Use as::
service = Service()
@service.xml
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/xml/myfunction?a=3&b=4
"""
self.run_procedures[f.__name__] = f
return f
def rss(self, f):
"""
Example:
Use as::
service = Service()
@service.rss
def myfunction():
return dict(title=..., link=..., description=...,
created_on=..., entries=[dict(title=..., link=...,
description=..., created_on=...])
def call():
return service()
Then call it with:
wget http://..../app/default/call/rss/myfunction
"""
self.rss_procedures[f.__name__] = f
return f
def json(self, f):
"""
Example:
Use as::
service = Service()
@service.json
def myfunction(a, b):
return [{a: b}]
def call():
return service()
Then call it with:;
wget http://..../app/default/call/json/myfunction?a=hello&b=world
"""
self.json_procedures[f.__name__] = f
return f
def jsonrpc(self, f):
"""
Example:
Use as::
service = Service()
@service.jsonrpc
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget http://..../app/default/call/jsonrpc/myfunction?a=hello&b=world
"""
self.jsonrpc_procedures[f.__name__] = f
return f
def jsonrpc2(self, f):
"""
Example:
Use as::
service = Service()
@service.jsonrpc2
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget --post-data '{"jsonrpc": "2.0", "id": 1, "method": "myfunction", "params": {"a": 1, "b": 2}}' http://..../app/default/call/jsonrpc2
"""
self.jsonrpc2_procedures[f.__name__] = f
return f
def xmlrpc(self, f):
"""
Example:
Use as::
service = Service()
@service.xmlrpc
def myfunction(a, b):
return a + b
def call():
return service()
The call it with:
wget http://..../app/default/call/xmlrpc/myfunction?a=hello&b=world
"""
self.xmlrpc_procedures[f.__name__] = f
return f
def amfrpc(self, f):
"""
Example:
Use as::
service = Service()
@service.amfrpc
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/amfrpc/myfunction?a=hello&b=world
"""
self.amfrpc_procedures[f.__name__] = f
return f
def amfrpc3(self, domain='default'):
"""
Example:
Use as::
service = Service()
@service.amfrpc3('domain')
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget http://..../app/default/call/amfrpc3/myfunction?a=hello&b=world
"""
if not isinstance(domain, str):
raise SyntaxError("AMF3 requires a domain for function")
def _amfrpc3(f):
if domain:
self.amfrpc3_procedures[domain + '.' + f.__name__] = f
else:
self.amfrpc3_procedures[f.__name__] = f
return f
return _amfrpc3
def soap(self, name=None, returns=None, args=None, doc=None):
"""
Example:
Use as::
service = Service()
@service.soap('MyFunction',returns={'result':int},args={'a':int,'b':int,})
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
from gluon.contrib.pysimplesoap.client import SoapClient
client = SoapClient(wsdl="http://..../app/default/call/soap?WSDL")
response = client.MyFunction(a=1,b=2)
return response['result']
It also exposes online generated documentation and xml example messages
at `http://..../app/default/call/soap`
"""
def _soap(f):
self.soap_procedures[name or f.__name__] = f, returns, args, doc
return f
return _soap
def serve_run(self, args=None):
request = current.request
if not args:
args = request.args
if args and args[0] in self.run_procedures:
return str(universal_caller(self.run_procedures[args[0]],
*args[1:], **dict(request.vars)))
self.error()
def serve_csv(self, args=None):
request = current.request
response = current.response
response.headers['Content-Type'] = 'text/x-csv'
if not args:
args = request.args
def none_exception(value):
if isinstance(value, unicode):
return value.encode('utf8')
if hasattr(value, 'isoformat'):
return value.isoformat()[:19].replace('T', ' ')
if value is None:
return '<NULL>'
return value
if args and args[0] in self.run_procedures:
import types
r = universal_caller(self.run_procedures[args[0]],
*args[1:], **dict(request.vars))
s = cStringIO.StringIO()
if hasattr(r, 'export_to_csv_file'):
r.export_to_csv_file(s)
elif r and not isinstance(r, types.GeneratorType) and isinstance(r[0], (dict, Storage)):
import csv
writer = csv.writer(s)
writer.writerow(r[0].keys())
for line in r:
writer.writerow([none_exception(v)
for v in line.values()])
else:
import csv
writer = csv.writer(s)
for line in r:
writer.writerow(line)
return s.getvalue()
self.error()
def serve_xml(self, args=None):
request = current.request
response = current.response
response.headers['Content-Type'] = 'text/xml'
if not args:
args = request.args
if args and args[0] in self.run_procedures:
s = universal_caller(self.run_procedures[args[0]],
*args[1:], **dict(request.vars))
if hasattr(s, 'as_list'):
s = s.as_list()
return serializers.xml(s, quote=False)
self.error()
def serve_rss(self, args=None):
request = current.request
response = current.response
if not args:
args = request.args
if args and args[0] in self.rss_procedures:
feed = universal_caller(self.rss_procedures[args[0]],
*args[1:], **dict(request.vars))
else:
self.error()
response.headers['Content-Type'] = 'application/rss+xml'
return serializers.rss(feed)
def serve_json(self, args=None):
request = current.request
response = current.response
response.headers['Content-Type'] = 'application/json; charset=utf-8'
if not args:
args = request.args
d = dict(request.vars)
if args and args[0] in self.json_procedures:
s = universal_caller(self.json_procedures[args[0]], *args[1:], **d)
if hasattr(s, 'as_list'):
s = s.as_list()
return response.json(s)
self.error()
class JsonRpcException(Exception):
def __init__(self, code, info):
jrpc_error = Service.jsonrpc_errors.get(code)
if jrpc_error:
self.message, self.description = jrpc_error
self.code, self.info = code, info
# jsonrpc 2.0 error types. records the following structure {code: (message,meaning)}
jsonrpc_errors = {
-32700: ("Parse error. Invalid JSON was received by the server.", "An error occurred on the server while parsing the JSON text."),
-32600: ("Invalid Request", "The JSON sent is not a valid Request object."),
-32601: ("Method not found", "The method does not exist / is not available."),
-32602: ("Invalid params", "Invalid method parameter(s)."),
-32603: ("Internal error", "Internal JSON-RPC error."),
-32099: ("Server error", "Reserved for implementation-defined server-errors.")}
def serve_jsonrpc(self):
def return_response(id, result):
return serializers.json({'version': '1.1',
'id': id, 'result': result, 'error': None})
def return_error(id, code, message, data=None):
error = {'name': 'JSONRPCError',
'code': code, 'message': message}
if data is not None:
error['data'] = data
return serializers.json({'id': id,
'version': '1.1',
'error': error,
})
request = current.request
response = current.response
response.headers['Content-Type'] = 'application/json; charset=utf-8'
methods = self.jsonrpc_procedures
data = json_parser.loads(request.body.read())
jsonrpc_2 = data.get('jsonrpc')
if jsonrpc_2: #hand over to version 2 of the protocol
return self.serve_jsonrpc2(data)
id, method, params = data.get('id'), data.get('method'), data.get('params', [])
if id is None:
return return_error(0, 100, 'missing id')
if not method in methods:
return return_error(id, 100, 'method "%s" does not exist' % method)
try:
if isinstance(params, dict):
s = methods[method](**params)
else:
s = methods[method](*params)
if hasattr(s, 'as_list'):
s = s.as_list()
return return_response(id, s)
except Service.JsonRpcException, e:
return return_error(id, e.code, e.info)
except:
etype, eval, etb = sys.exc_info()
message = '%s: %s' % (etype.__name__, eval)
data = request.is_local and traceback.format_tb(etb)
logger.warning('jsonrpc exception %s\n%s' % (message, traceback.format_tb(etb)))
return return_error(id, 100, message, data)
def serve_jsonrpc2(self, data=None, batch_element=False):
def return_response(id, result):
if not must_respond:
return None
return serializers.json({'jsonrpc': '2.0',
'id': id, 'result': result})
def return_error(id, code, message=None, data=None):
error = {'code': code}
if Service.jsonrpc_errors.has_key(code):
error['message'] = Service.jsonrpc_errors[code][0]
error['data'] = Service.jsonrpc_errors[code][1]
if message is not None:
error['message'] = message
if data is not None:
error['data'] = data
return serializers.json({'jsonrpc': '2.0',
'id': id,
'error': error})
def validate(data):
"""
Validate request as defined in: http://www.jsonrpc.org/specification#request_object.
Args:
data(str): The json object.
Returns:
- True -- if successful
- False -- if no error should be reported (i.e. data is missing 'id' member)
Raises:
JsonRPCException
"""
iparms = set(data.keys())
mandatory_args = set(['jsonrpc', 'method'])
missing_args = mandatory_args - iparms
if missing_args:
raise Service.JsonRpcException(-32600, 'Missing arguments %s.' % list(missing_args))
if data['jsonrpc'] != '2.0':
raise Service.JsonRpcException(-32603, 'Unsupported jsonrpc version "%s"' % data['jsonrpc'])
if 'id' not in iparms:
return False
return True
request = current.request
response = current.response
if not data:
response.headers['Content-Type'] = 'application/json; charset=utf-8'
try:
data = json_parser.loads(request.body.read())
except ValueError: # decoding error in json lib
return return_error(None, -32700)
# Batch handling
if isinstance(data, list) and not batch_element:
retlist = []
for c in data:
retstr = self.serve_jsonrpc2(c, batch_element=True)
if retstr: # do not add empty responses
retlist.append(retstr)
if len(retlist) == 0: # return nothing
return ''
else:
return "[" + ','.join(retlist) + "]"
methods = self.jsonrpc2_procedures
methods.update(self.jsonrpc_procedures)
try:
must_respond = validate(data)
except Service.JsonRpcException, e:
return return_error(None, e.code, e.info)
id, method, params = data.get('id'), data['method'], data.get('params', '')
if not method in methods:
return return_error(id, -32601, data='Method "%s" does not exist' % method)
try:
if isinstance(params, dict):
s = methods[method](**params)
else:
s = methods[method](*params)
if hasattr(s, 'as_list'):
s = s.as_list()
if must_respond:
return return_response(id, s)
else:
return ''
except HTTP, e:
raise e
except Service.JsonRpcException, e:
return return_error(id, e.code, e.info)
except:
etype, eval, etb = sys.exc_info()
data = '%s: %s\n' % (etype.__name__, eval) + str(request.is_local and traceback.format_tb(etb))
logger.warning('%s: %s\n%s' % (etype.__name__, eval, traceback.format_tb(etb)))
return return_error(id, -32099, data=data)
def serve_xmlrpc(self):
request = current.request
response = current.response
services = self.xmlrpc_procedures.values()
return response.xmlrpc(request, services)
def serve_amfrpc(self, version=0):
try:
import pyamf
import pyamf.remoting.gateway
except:
return "pyamf not installed or not in Python sys.path"
request = current.request
response = current.response
if version == 3:
services = self.amfrpc3_procedures
base_gateway = pyamf.remoting.gateway.BaseGateway(services)
pyamf_request = pyamf.remoting.decode(request.body)
else:
services = self.amfrpc_procedures
base_gateway = pyamf.remoting.gateway.BaseGateway(services)
context = pyamf.get_context(pyamf.AMF0)
pyamf_request = pyamf.remoting.decode(request.body, context)
pyamf_response = pyamf.remoting.Envelope(pyamf_request.amfVersion)
for name, message in pyamf_request:
pyamf_response[name] = base_gateway.getProcessor(message)(message)
response.headers['Content-Type'] = pyamf.remoting.CONTENT_TYPE
if version == 3:
return pyamf.remoting.encode(pyamf_response).getvalue()
else:
return pyamf.remoting.encode(pyamf_response, context).getvalue()
def serve_soap(self, version="1.1"):
try:
from gluon.contrib.pysimplesoap.server import SoapDispatcher
except:
return "pysimplesoap not installed in contrib"
request = current.request
response = current.response
procedures = self.soap_procedures
location = "%s://%s%s" % (
request.env.wsgi_url_scheme,
request.env.http_host,
URL(r=request, f="call/soap", vars={}))
namespace = 'namespace' in response and response.namespace or location
documentation = response.description or ''
dispatcher = SoapDispatcher(
name=response.title,
location=location,
action=location, # SOAPAction
namespace=namespace,
prefix='pys',
documentation=documentation,
ns=True)
for method, (function, returns, args, doc) in procedures.iteritems():
dispatcher.register_function(method, function, returns, args, doc)
if request.env.request_method == 'POST':
fault = {}
# Process normal Soap Operation
response.headers['Content-Type'] = 'text/xml'
xml = dispatcher.dispatch(request.body.read(), fault=fault)
if fault:
# May want to consider populating a ticket here...
response.status = 500
# return the soap response
return xml
elif 'WSDL' in request.vars:
# Return Web Service Description
response.headers['Content-Type'] = 'text/xml'
return dispatcher.wsdl()
elif 'op' in request.vars:
# Return method help webpage
response.headers['Content-Type'] = 'text/html'
method = request.vars['op']
sample_req_xml, sample_res_xml, doc = dispatcher.help(method)
body = [H1("Welcome to Web2Py SOAP webservice gateway"),
A("See all webservice operations",
_href=URL(r=request, f="call/soap", vars={})),
H2(method),
P(doc),
UL(LI("Location: %s" % dispatcher.location),
LI("Namespace: %s" % dispatcher.namespace),
LI("SoapAction: %s" % dispatcher.action),
),
H3("Sample SOAP XML Request Message:"),
CODE(sample_req_xml, language="xml"),
H3("Sample SOAP XML Response Message:"),
CODE(sample_res_xml, language="xml"),
]
return {'body': body}
else:
# Return general help and method list webpage
response.headers['Content-Type'] = 'text/html'
body = [H1("Welcome to Web2Py SOAP webservice gateway"),
P(response.description),
P("The following operations are available"),
A("See WSDL for webservice description",
_href=URL(r=request, f="call/soap", vars={"WSDL":None})),
UL([LI(A("%s: %s" % (method, doc or ''),
_href=URL(r=request, f="call/soap", vars={'op': method})))
for method, doc in dispatcher.list_methods()]),
]
return {'body': body}
def __call__(self):
"""
Registers services with::
service = Service()
@service.run
@service.rss
@service.json
@service.jsonrpc
@service.xmlrpc
@service.amfrpc
@service.amfrpc3('domain')
@service.soap('Method', returns={'Result':int}, args={'a':int,'b':int,})
Exposes services with::
def call():
return service()
You can call services with::
http://..../app/default/call/run?[parameters]
http://..../app/default/call/rss?[parameters]
http://..../app/default/call/json?[parameters]
http://..../app/default/call/jsonrpc
http://..../app/default/call/xmlrpc
http://..../app/default/call/amfrpc
http://..../app/default/call/amfrpc3
http://..../app/default/call/soap
"""
request = current.request
if len(request.args) < 1:
raise HTTP(404, "Not Found")
arg0 = request.args(0)
if arg0 == 'run':
return self.serve_run(request.args[1:])
elif arg0 == 'rss':
return self.serve_rss(request.args[1:])
elif arg0 == 'csv':
return self.serve_csv(request.args[1:])
elif arg0 == 'xml':
return self.serve_xml(request.args[1:])
elif arg0 == 'json':
return self.serve_json(request.args[1:])
elif arg0 == 'jsonrpc':
return self.serve_jsonrpc()
elif arg0 == 'jsonrpc2':
return self.serve_jsonrpc2()
elif arg0 == 'xmlrpc':
return self.serve_xmlrpc()
elif arg0 == 'amfrpc':
return self.serve_amfrpc()
elif arg0 == 'amfrpc3':
return self.serve_amfrpc(3)
elif arg0 == 'soap':
return self.serve_soap()
else:
self.error()
def error(self):
raise HTTP(404, "Object does not exist")
def completion(callback):
"""
Executes a task on completion of the called action.
Example:
Use as::
from gluon.tools import completion
@completion(lambda d: logging.info(repr(d)))
def index():
return dict(message='hello')
It logs the output of the function every time input is called.
The argument of completion is executed in a new thread.
"""
def _completion(f):
def __completion(*a, **b):
d = None
try:
d = f(*a, **b)
return d
finally:
thread.start_new_thread(callback, (d,))
return __completion
return _completion
def prettydate(d, T=lambda x: x):
if isinstance(d, datetime.datetime):
dt = datetime.datetime.now() - d
elif isinstance(d, datetime.date):
dt = datetime.date.today() - d
elif not d:
return ''
else:
return '[invalid date]'
if dt.days < 0:
suffix = ' from now'
dt = -dt
else:
suffix = ' ago'
if dt.days >= 2 * 365:
return T('%d years' + suffix) % int(dt.days / 365)
elif dt.days >= 365:
return T('1 year' + suffix)
elif dt.days >= 60:
return T('%d months' + suffix) % int(dt.days / 30)
elif dt.days > 21:
return T('1 month' + suffix)
elif dt.days >= 14:
return T('%d weeks' + suffix) % int(dt.days / 7)
elif dt.days >= 7:
return T('1 week' + suffix)
elif dt.days > 1:
return T('%d days' + suffix) % dt.days
elif dt.days == 1:
return T('1 day' + suffix)
elif dt.seconds >= 2 * 60 * 60:
return T('%d hours' + suffix) % int(dt.seconds / 3600)
elif dt.seconds >= 60 * 60:
return T('1 hour' + suffix)
elif dt.seconds >= 2 * 60:
return T('%d minutes' + suffix) % int(dt.seconds / 60)
elif dt.seconds >= 60:
return T('1 minute' + suffix)
elif dt.seconds > 1:
return T('%d seconds' + suffix) % dt.seconds
elif dt.seconds == 1:
return T('1 second' + suffix)
else:
return T('now')
def test_thread_separation():
def f():
c = PluginManager()
lock1.acquire()
lock2.acquire()
c.x = 7
lock1.release()
lock2.release()
lock1 = thread.allocate_lock()
lock2 = thread.allocate_lock()
lock1.acquire()
thread.start_new_thread(f, ())
a = PluginManager()
a.x = 5
lock1.release()
lock2.acquire()
return a.x
class PluginManager(object):
"""
Plugin Manager is similar to a storage object but it is a single level
singleton. This means that multiple instances within the same thread share
the same attributes.
Its constructor is also special. The first argument is the name of the
plugin you are defining.
The named arguments are parameters needed by the plugin with default values.
If the parameters were previous defined, the old values are used.
Example:
in some general configuration file::
plugins = PluginManager()
plugins.me.param1=3
within the plugin model::
_ = PluginManager('me',param1=5,param2=6,param3=7)
where the plugin is used::
>>> print plugins.me.param1
3
>>> print plugins.me.param2
6
>>> plugins.me.param3 = 8
>>> print plugins.me.param3
8
Here are some tests::
>>> a=PluginManager()
>>> a.x=6
>>> b=PluginManager('check')
>>> print b.x
6
>>> b=PluginManager() # reset settings
>>> print b.x
<Storage {}>
>>> b.x=7
>>> print a.x
7
>>> a.y.z=8
>>> print b.y.z
8
>>> test_thread_separation()
5
>>> plugins=PluginManager('me',db='mydb')
>>> print plugins.me.db
mydb
>>> print 'me' in plugins
True
>>> print plugins.me.installed
True
"""
instances = {}
def __new__(cls, *a, **b):
id = thread.get_ident()
lock = thread.allocate_lock()
try:
lock.acquire()
try:
return cls.instances[id]
except KeyError:
instance = object.__new__(cls, *a, **b)
cls.instances[id] = instance
return instance
finally:
lock.release()
def __init__(self, plugin=None, **defaults):
if not plugin:
self.__dict__.clear()
settings = self.__getattr__(plugin)
settings.installed = True
settings.update(
(k, v) for k, v in defaults.items() if not k in settings)
def __getattr__(self, key):
if not key in self.__dict__:
self.__dict__[key] = Storage()
return self.__dict__[key]
def keys(self):
return self.__dict__.keys()
def __contains__(self, key):
return key in self.__dict__
class Expose(object):
def __init__(self, base=None, basename=None, extensions=None, allow_download=True):
"""
Examples:
Use as::
def static():
return dict(files=Expose())
or::
def static():
path = os.path.join(request.folder,'static','public')
return dict(files=Expose(path,basename='public'))
Args:
extensions: an optional list of file extensions for filtering
displayed files: e.g. `['.py', '.jpg']`
allow_download: whether to allow downloading selected files
"""
current.session.forget()
base = base or os.path.join(current.request.folder, 'static')
basename = basename or current.request.function
self.basename = basename
if current.request.raw_args:
self.args = [arg for arg in current.request.raw_args.split('/') if arg]
else:
self.args = [arg for arg in current.request.args if arg]
filename = os.path.join(base, *self.args)
if not os.path.exists(filename):
raise HTTP(404, "FILE NOT FOUND")
if not os.path.normpath(filename).startswith(base):
raise HTTP(401, "NOT AUTHORIZED")
if allow_download and not os.path.isdir(filename):
current.response.headers['Content-Type'] = contenttype(filename)
raise HTTP(200, open(filename, 'rb'), **current.response.headers)
self.path = path = os.path.join(filename, '*')
self.folders = [f[len(path) - 1:] for f in sorted(glob.glob(path))
if os.path.isdir(f) and not self.isprivate(f)]
self.filenames = [f[len(path) - 1:] for f in sorted(glob.glob(path))
if not os.path.isdir(f) and not self.isprivate(f)]
if 'README' in self.filenames:
readme = open(os.path.join(filename, 'README')).read()
self.paragraph = MARKMIN(readme)
else:
self.paragraph = None
if extensions:
self.filenames = [f for f in self.filenames
if os.path.splitext(f)[-1] in extensions]
def breadcrumbs(self, basename):
path = []
span = SPAN()
span.append(A(basename, _href=URL()))
for arg in self.args:
span.append('/')
path.append(arg)
span.append(A(arg, _href=URL(args='/'.join(path))))
return span
def table_folders(self):
if self.folders:
return SPAN(H3('Folders'), TABLE(
*[TR(TD(A(folder, _href=URL(args=self.args + [folder]))))
for folder in self.folders],
**dict(_class="table")))
return ''
@staticmethod
def isprivate(f):
return 'private' in f or f.startswith('.') or f.endswith('~')
@staticmethod
def isimage(f):
return os.path.splitext(f)[-1].lower() in (
'.png', '.jpg', '.jpeg', '.gif', '.tiff')
def table_files(self, width=160):
if self.filenames:
return SPAN(H3('Files'),
TABLE(*[TR(TD(A(f, _href=URL(args=self.args + [f]))),
TD(IMG(_src=URL(args=self.args + [f]),
_style='max-width:%spx' % width)
if width and self.isimage(f) else ''))
for f in self.filenames],
**dict(_class="table")))
return ''
def xml(self):
return DIV(
H2(self.breadcrumbs(self.basename)),
self.paragraph or '',
self.table_folders(),
self.table_files()).xml()
class Wiki(object):
everybody = 'everybody'
rows_page = 25
def markmin_base(self, body):
return MARKMIN(body, extra=self.settings.extra,
url=True, environment=self.env,
autolinks=lambda link: expand_one(link, {})).xml()
def render_tags(self, tags):
return DIV(
_class='w2p_wiki_tags',
*[A(t.strip(), _href=URL(args='_search', vars=dict(q=t)))
for t in tags or [] if t.strip()])
def markmin_render(self, page):
return self.markmin_base(page.body) + self.render_tags(page.tags).xml()
def html_render(self, page):
html = page.body
# @///function -> http://..../function
html = replace_at_urls(html, URL)
# http://...jpg -> <img src="http://...jpg/> or embed
html = replace_autolinks(html, lambda link: expand_one(link, {}))
# @{component:name} -> <script>embed component name</script>
html = replace_components(html, self.env)
html = html + self.render_tags(page.tags).xml()
return html
@staticmethod
def component(text):
"""
In wiki docs allows `@{component:controller/function/args}`
which renders as a `LOAD(..., ajax=True)`
"""
items = text.split('/')
controller, function, args = items[0], items[1], items[2:]
return LOAD(controller, function, args=args, ajax=True).xml()
def get_renderer(self):
if isinstance(self.settings.render, basestring):
r = getattr(self, "%s_render" % self.settings.render)
elif callable(self.settings.render):
r = self.settings.render
elif isinstance(self.settings.render, dict):
def custom_render(page):
if page.render:
if page.render in self.settings.render.keys():
my_render = self.settings.render[page.render]
else:
my_render = getattr(self, "%s_render" % page.render)
else:
my_render = self.markmin_render
return my_render(page)
r = custom_render
else:
raise ValueError(
"Invalid render type %s" % type(self.settings.render))
return r
def __init__(self, auth, env=None, render='markmin',
manage_permissions=False, force_prefix='',
restrict_search=False, extra=None,
menu_groups=None, templates=None, migrate=True,
controller=None, function=None, groups=None):
settings = self.settings = auth.settings.wiki
"""
Args:
render:
- "markmin"
- "html"
- `<function>` : Sets a custom render function
- `dict(html=<function>, markmin=...)`: dict(...) allows
multiple custom render functions
- "multiple" : Is the same as `{}`. It enables per-record
formats using builtins
"""
engines = set(['markmin', 'html'])
show_engine = False
if render == "multiple":
render = {}
if isinstance(render, dict):
[engines.add(key) for key in render]
show_engine = True
settings.render = render
perms = settings.manage_permissions = manage_permissions
settings.force_prefix = force_prefix
settings.restrict_search = restrict_search
settings.extra = extra or {}
settings.menu_groups = menu_groups
settings.templates = templates
settings.controller = controller
settings.function = function
settings.groups = auth.user_groups.values() \
if groups is None else groups
db = auth.db
self.env = env or {}
self.env['component'] = Wiki.component
self.auth = auth
self.wiki_menu_items = None
if self.auth.user:
self.settings.force_prefix = force_prefix % self.auth.user
else:
self.settings.force_prefix = force_prefix
self.host = current.request.env.http_host
table_definitions = [
('wiki_page', {
'args': [
Field('slug',
requires=[IS_SLUG(),
IS_NOT_IN_DB(db, 'wiki_page.slug')],
writable=False),
Field('title', length=255, unique=True),
Field('body', 'text', notnull=True),
Field('tags', 'list:string'),
Field('can_read', 'list:string',
writable=perms,
readable=perms,
default=[Wiki.everybody]),
Field('can_edit', 'list:string',
writable=perms, readable=perms,
default=[Wiki.everybody]),
Field('changelog'),
Field('html', 'text',
compute=self.get_renderer(),
readable=False, writable=False),
Field('render', default="markmin",
readable=show_engine,
writable=show_engine,
requires=IS_EMPTY_OR(
IS_IN_SET(engines))),
auth.signature],
'vars': {'format': '%(title)s', 'migrate': migrate}}),
('wiki_tag', {
'args': [
Field('name'),
Field('wiki_page', 'reference wiki_page'),
auth.signature],
'vars':{'format': '%(title)s', 'migrate': migrate}}),
('wiki_media', {
'args': [
Field('wiki_page', 'reference wiki_page'),
Field('title', required=True),
Field('filename', 'upload', required=True),
auth.signature],
'vars': {'format': '%(title)s', 'migrate': migrate}}),
]
# define only non-existent tables
for key, value in table_definitions:
args = []
if not key in db.tables():
# look for wiki_ extra fields in auth.settings
extra_fields = auth.settings.extra_fields
if extra_fields:
if key in extra_fields:
if extra_fields[key]:
for field in extra_fields[key]:
args.append(field)
args += value['args']
db.define_table(key, *args, **value['vars'])
if self.settings.templates is None and not \
self.settings.manage_permissions:
self.settings.templates = db.wiki_page.tags.contains('template') & \
db.wiki_page.can_read.contains('everybody')
def update_tags_insert(page, id, db=db):
for tag in page.tags or []:
tag = tag.strip().lower()
if tag:
db.wiki_tag.insert(name=tag, wiki_page=id)
def update_tags_update(dbset, page, db=db):
page = dbset.select(limitby=(0, 1)).first()
db(db.wiki_tag.wiki_page == page.id).delete()
for tag in page.tags or []:
tag = tag.strip().lower()
if tag:
db.wiki_tag.insert(name=tag, wiki_page=page.id)
db.wiki_page._after_insert.append(update_tags_insert)
db.wiki_page._after_update.append(update_tags_update)
if (auth.user and
check_credentials(current.request, gae_login=False) and
not 'wiki_editor' in auth.user_groups.values() and
self.settings.groups == auth.user_groups.values()):
group = db.auth_group(role='wiki_editor')
gid = group.id if group else db.auth_group.insert(
role='wiki_editor')
auth.add_membership(gid)
settings.lock_keys = True
# WIKI ACCESS POLICY
def not_authorized(self, page=None):
raise HTTP(401)
def can_read(self, page):
if 'everybody' in page.can_read or not \
self.settings.manage_permissions:
return True
elif self.auth.user:
groups = self.settings.groups
if ('wiki_editor' in groups or
set(groups).intersection(set(page.can_read + page.can_edit)) or
page.created_by == self.auth.user.id):
return True
return False
def can_edit(self, page=None):
if not self.auth.user:
redirect(self.auth.settings.login_url)
groups = self.settings.groups
return ('wiki_editor' in groups or
(page is None and 'wiki_author' in groups) or
not page is None and (
set(groups).intersection(set(page.can_edit)) or
page.created_by == self.auth.user.id))
def can_manage(self):
if not self.auth.user:
return False
groups = self.settings.groups
return 'wiki_editor' in groups
def can_search(self):
return True
def can_see_menu(self):
if self.auth.user:
if self.settings.menu_groups is None:
return True
else:
groups = self.settings.groups
if any(t in self.settings.menu_groups for t in groups):
return True
return False
### END POLICY
def automenu(self):
"""adds the menu if not present"""
if (not self.wiki_menu_items and
self.settings.controller and
self.settings.function):
self.wiki_menu_items = self.menu(self.settings.controller,
self.settings.function)
current.response.menu += self.wiki_menu_items
def __call__(self):
request = current.request
settings = self.settings
settings.controller = settings.controller or request.controller
settings.function = settings.function or request.function
self.automenu()
zero = request.args(0) or 'index'
if zero and zero.isdigit():
return self.media(int(zero))
elif not zero or not zero.startswith('_'):
return self.read(zero)
elif zero == '_edit':
return self.edit(request.args(1) or 'index', request.args(2) or 0)
elif zero == '_editmedia':
return self.editmedia(request.args(1) or 'index')
elif zero == '_create':
return self.create()
elif zero == '_pages':
return self.pages()
elif zero == '_search':
return self.search()
elif zero == '_recent':
ipage = int(request.vars.page or 0)
query = self.auth.db.wiki_page.created_by == request.args(
1, cast=int)
return self.search(query=query,
orderby=~self.auth.db.wiki_page.created_on,
limitby=(ipage * self.rows_page,
(ipage + 1) * self.rows_page),
)
elif zero == '_cloud':
return self.cloud()
elif zero == '_preview':
return self.preview(self.get_renderer())
def first_paragraph(self, page):
if not self.can_read(page):
mm = (page.body or '').replace('\r', '')
ps = [p for p in mm.split('\n\n')
if not p.startswith('#') and p.strip()]
if ps:
return ps[0]
return ''
def fix_hostname(self, body):
return (body or '').replace('://HOSTNAME', '://%s' % self.host)
def read(self, slug, force_render=False):
if slug in '_cloud':
return self.cloud()
elif slug in '_search':
return self.search()
page = self.auth.db.wiki_page(slug=slug)
if page and (not self.can_read(page)):
return self.not_authorized(page)
if current.request.extension == 'html':
if not page:
url = URL(args=('_create', slug))
return dict(content=A('Create page "%s"' % slug, _href=url, _class="btn"))
else:
html = page.html if not force_render else self.get_renderer()(page)
content = XML(self.fix_hostname(html))
return dict(title=page.title,
slug=page.slug,
page=page,
content=content,
tags=page.tags,
created_on=page.created_on,
modified_on=page.modified_on)
elif current.request.extension == 'load':
return self.fix_hostname(page.html) if page else ''
else:
if not page:
raise HTTP(404)
else:
return dict(title=page.title,
slug=page.slug,
page=page,
content=page.body,
tags=page.tags,
created_on=page.created_on,
modified_on=page.modified_on)
def edit(self, slug, from_template=0):
auth = self.auth
db = auth.db
page = db.wiki_page(slug=slug)
if not self.can_edit(page):
return self.not_authorized(page)
title_guess = ' '.join(c.capitalize() for c in slug.split('-'))
if not page:
if not (self.can_manage() or
slug.startswith(self.settings.force_prefix)):
current.session.flash = 'slug must have "%s" prefix' \
% self.settings.force_prefix
redirect(URL(args=('_create')))
db.wiki_page.can_read.default = [Wiki.everybody]
db.wiki_page.can_edit.default = [auth.user_group_role()]
db.wiki_page.title.default = title_guess
db.wiki_page.slug.default = slug
if slug == 'wiki-menu':
db.wiki_page.body.default = \
'- Menu Item > @////index\n- - Submenu > http://web2py.com'
else:
db.wiki_page.body.default = db(db.wiki_page.id == from_template).select(db.wiki_page.body)[0].body \
if int(from_template) > 0 else '## %s\n\npage content' % title_guess
vars = current.request.post_vars
if vars.body:
vars.body = vars.body.replace('://%s' % self.host, '://HOSTNAME')
form = SQLFORM(db.wiki_page, page, deletable=True,
formstyle='table2cols', showid=False).process()
if form.deleted:
current.session.flash = 'page deleted'
redirect(URL())
elif form.accepted:
current.session.flash = 'page created'
redirect(URL(args=slug))
script = """
jQuery(function() {
if (!jQuery('#wiki_page_body').length) return;
var pagecontent = jQuery('#wiki_page_body');
pagecontent.css('font-family',
'Monaco,Menlo,Consolas,"Courier New",monospace');
var prevbutton = jQuery('<button class="btn nopreview">Preview</button>');
var preview = jQuery('<div id="preview"></div>').hide();
var previewmedia = jQuery('<div id="previewmedia"></div>');
var form = pagecontent.closest('form');
preview.insertBefore(form);
prevbutton.insertBefore(form);
if(%(link_media)s) {
var mediabutton = jQuery('<button class="btn nopreview">Media</button>');
mediabutton.insertBefore(form);
previewmedia.insertBefore(form);
mediabutton.click(function() {
if (mediabutton.hasClass('nopreview')) {
web2py_component('%(urlmedia)s', 'previewmedia');
} else {
previewmedia.empty();
}
mediabutton.toggleClass('nopreview');
});
}
prevbutton.click(function(e) {
e.preventDefault();
if (prevbutton.hasClass('nopreview')) {
prevbutton.addClass('preview').removeClass(
'nopreview').html('Edit Source');
try{var wiki_render = jQuery('#wiki_page_render').val()}
catch(e){var wiki_render = null;}
web2py_ajax_page('post', \
'%(url)s', {body: jQuery('#wiki_page_body').val(), \
render: wiki_render}, 'preview');
form.fadeOut('fast', function() {preview.fadeIn()});
} else {
prevbutton.addClass(
'nopreview').removeClass('preview').html('Preview');
preview.fadeOut('fast', function() {form.fadeIn()});
}
})
})
""" % dict(url=URL(args=('_preview', slug)), link_media=('true' if page else 'false'),
urlmedia=URL(extension='load',
args=('_editmedia', slug),
vars=dict(embedded=1)))
return dict(content=TAG[''](form, SCRIPT(script)))
def editmedia(self, slug):
auth = self.auth
db = auth.db
page = db.wiki_page(slug=slug)
if not (page and self.can_edit(page)):
return self.not_authorized(page)
self.auth.db.wiki_media.id.represent = lambda id, row: \
id if not row.filename else \
SPAN('@////%i/%s.%s' % (id, IS_SLUG.urlify(row.title.split('.')[0]), row.filename.split('.')[-1]))
self.auth.db.wiki_media.wiki_page.default = page.id
self.auth.db.wiki_media.wiki_page.writable = False
links = []
csv = True
create = True
if current.request.vars.embedded:
script = "var c = jQuery('
fragment = self.auth.db.wiki_media.id.represent
csv = False
create = False
links= [
lambda row:
A('copy into source', _href='#', _onclick=script % (fragment(row.id, row)))
]
content = SQLFORM.grid(
self.auth.db.wiki_media.wiki_page == page.id,
orderby=self.auth.db.wiki_media.title,
links=links,
csv=csv,
create=create,
args=['_editmedia', slug],
user_signature=False)
return dict(content=content)
def create(self):
if not self.can_edit():
return self.not_authorized()
db = self.auth.db
slugs = db(db.wiki_page.id > 0).select(db.wiki_page.id, db.wiki_page.slug)
options = [OPTION(row.slug, _value=row.id) for row in slugs]
options.insert(0, OPTION('', _value=''))
fields = [Field("slug", default=current.request.args(1) or
self.settings.force_prefix,
requires=(IS_SLUG(), IS_NOT_IN_DB(db, db.wiki_page.slug))),]
if self.settings.templates:
fields.append(
Field("from_template", "reference wiki_page",
requires=IS_EMPTY_OR(
IS_IN_DB(db(self.settings.templates),
db.wiki_page._id,
'%(slug)s')),
comment=current.T(
"Choose Template or empty for new Page")))
form = SQLFORM.factory(*fields, **dict(_class="well"))
form.element("[type=submit]").attributes["_value"] = \
current.T("Create Page from Slug")
if form.process().accepted:
form.vars.from_template = 0 if not form.vars.from_template \
else form.vars.from_template
redirect(URL(args=('_edit', form.vars.slug, form.vars.from_template or 0))) # added param
return dict(content=form)
def pages(self):
if not self.can_manage():
return self.not_authorized()
self.auth.db.wiki_page.slug.represent = lambda slug, row: SPAN(
'@////%s' % slug)
self.auth.db.wiki_page.title.represent = lambda title, row: \
A(title, _href=URL(args=row.slug))
wiki_table = self.auth.db.wiki_page
content = SQLFORM.grid(
wiki_table,
fields=[wiki_table.slug,
wiki_table.title, wiki_table.tags,
wiki_table.can_read, wiki_table.can_edit],
links=[
lambda row:
A('edit', _href=URL(args=('_edit', row.slug)), _class='btn'),
lambda row:
A('media', _href=URL(args=('_editmedia', row.slug)), _class='btn')],
details=False, editable=False, deletable=False, create=False,
orderby=self.auth.db.wiki_page.title,
args=['_pages'],
user_signature=False)
return dict(content=content)
def media(self, id):
request, response, db = current.request, current.response, self.auth.db
media = db.wiki_media(id)
if media:
if self.settings.manage_permissions:
page = db.wiki_page(media.wiki_page)
if not self.can_read(page):
return self.not_authorized(page)
request.args = [media.filename]
m = response.download(request, db)
current.session.forget() # get rid of the cookie
response.headers['Last-Modified'] = \
request.utcnow.strftime("%a, %d %b %Y %H:%M:%S GMT")
if 'Content-Disposition' in response.headers:
del response.headers['Content-Disposition']
response.headers['Pragma'] = 'cache'
response.headers['Cache-Control'] = 'private'
return m
else:
raise HTTP(404)
def menu(self, controller='default', function='index'):
db = self.auth.db
request = current.request
menu_page = db.wiki_page(slug='wiki-menu')
menu = []
if menu_page:
tree = {'': menu}
regex = re.compile('[\r\n\t]*(?P<base>(\s*\-\s*)+)(?P<title>\w.*?)\s+\>\s+(?P<link>\S+)')
for match in regex.finditer(self.fix_hostname(menu_page.body)):
base = match.group('base').replace(' ', '')
title = match.group('title')
link = match.group('link')
title_page = None
if link.startswith('@'):
items = link[2:].split('/')
if len(items) > 3:
title_page = items[3]
link = URL(a=items[0] or None, c=items[1] or controller,
f=items[2] or function, args=items[3:])
parent = tree.get(base[1:], tree[''])
subtree = []
tree[base] = subtree
parent.append((current.T(title),
request.args(0) == title_page,
link, subtree))
if self.can_see_menu():
submenu = []
menu.append((current.T('[Wiki]'), None, None, submenu))
if URL() == URL(controller, function):
if not str(request.args(0)).startswith('_'):
slug = request.args(0) or 'index'
mode = 1
elif request.args(0) == '_edit':
slug = request.args(1) or 'index'
mode = 2
elif request.args(0) == '_editmedia':
slug = request.args(1) or 'index'
mode = 3
else:
mode = 0
if mode in (2, 3):
submenu.append((current.T('View Page'), None,
URL(controller, function, args=slug)))
if mode in (1, 3):
submenu.append((current.T('Edit Page'), None,
URL(controller, function, args=('_edit', slug))))
if mode in (1, 2):
submenu.append((current.T('Edit Page Media'), None,
URL(controller, function, args=('_editmedia', slug))))
submenu.append((current.T('Create New Page'), None,
URL(controller, function, args=('_create'))))
# Moved next if to inside self.auth.user check
if self.can_manage():
submenu.append((current.T('Manage Pages'), None,
URL(controller, function, args=('_pages'))))
submenu.append((current.T('Edit Menu'), None,
URL(controller, function, args=('_edit', 'wiki-menu'))))
# Also moved inside self.auth.user check
submenu.append((current.T('Search Pages'), None,
URL(controller, function, args=('_search'))))
return menu
def search(self, tags=None, query=None, cloud=True, preview=True,
limitby=(0, 100), orderby=None):
if not self.can_search():
return self.not_authorized()
request = current.request
content = CAT()
if tags is None and query is None:
form = FORM(INPUT(_name='q', requires=IS_NOT_EMPTY(),
value=request.vars.q),
INPUT(_type="submit", _value=current.T('Search')),
_method='GET')
content.append(DIV(form, _class='w2p_wiki_form'))
if request.vars.q:
tags = [v.strip() for v in request.vars.q.split(',')]
tags = [v.lower() for v in tags if v]
if tags or not query is None:
db = self.auth.db
count = db.wiki_tag.wiki_page.count()
fields = [db.wiki_page.id, db.wiki_page.slug,
db.wiki_page.title, db.wiki_page.tags,
db.wiki_page.can_read]
if preview:
fields.append(db.wiki_page.body)
if query is None:
query = (db.wiki_page.id == db.wiki_tag.wiki_page) &\
(db.wiki_tag.name.belongs(tags))
query = query | db.wiki_page.title.contains(request.vars.q)
if self.settings.restrict_search and not self.manage():
query = query & (db.wiki_page.created_by == self.auth.user_id)
pages = db(query).select(count,
*fields, **dict(orderby=orderby or ~count,
groupby=reduce(lambda a, b: a | b, fields),
distinct=True,
limitby=limitby))
if request.extension in ('html', 'load'):
if not pages:
content.append(DIV(current.T("No results"),
_class='w2p_wiki_form'))
def link(t):
return A(t, _href=URL(args='_search', vars=dict(q=t)))
items = [DIV(H3(A(p.wiki_page.title, _href=URL(
args=p.wiki_page.slug))),
MARKMIN(self.first_paragraph(p.wiki_page))
if preview else '',
DIV(_class='w2p_wiki_tags',
*[link(t.strip()) for t in
p.wiki_page.tags or [] if t.strip()]),
_class='w2p_wiki_search_item')
for p in pages]
content.append(DIV(_class='w2p_wiki_pages', *items))
else:
cloud = False
content = [p.wiki_page.as_dict() for p in pages]
elif cloud:
content.append(self.cloud()['content'])
if request.extension == 'load':
return content
return dict(content=content)
def cloud(self):
db = self.auth.db
count = db.wiki_tag.wiki_page.count(distinct=True)
ids = db(db.wiki_tag).select(
db.wiki_tag.name, count,
distinct=True,
groupby=db.wiki_tag.name,
orderby=~count, limitby=(0, 20))
if ids:
a, b = ids[0](count), ids[-1](count)
def style(c):
STYLE = 'padding:0 0.2em;line-height:%.2fem;font-size:%.2fem'
size = (1.5 * (c - b) / max(a - b, 1) + 1.3)
return STYLE % (1.3, size)
items = []
for item in ids:
items.append(A(item.wiki_tag.name,
_style=style(item(count)),
_href=URL(args='_search',
vars=dict(q=item.wiki_tag.name))))
items.append(' ')
return dict(content=DIV(_class='w2p_cloud', *items))
def preview(self, render):
request = current.request
# FIXME: This is an ugly hack to ensure a default render
# engine if not specified (with multiple render engines)
if not "render" in request.post_vars:
request.post_vars.render = None
return render(request.post_vars)
class Config(object):
def __init__(
self,
filename,
section,
default_values={}
):
self.config = ConfigParser.ConfigParser(default_values)
self.config.read(filename)
if not self.config.has_section(section):
self.config.add_section(section)
self.section = section
self.filename = filename
def read(self):
if not(isinstance(current.session['settings_%s' % self.section], dict)):
settings = dict(self.config.items(self.section))
else:
settings = current.session['settings_%s' % self.section]
return settings
def save(self, options):
for option, value in options:
self.config.set(self.section, option, value)
try:
self.config.write(open(self.filename, 'w'))
result = True
except:
current.session['settings_%s' % self.section] = dict(self.config.items(self.section))
result = False
return result
if __name__ == '__main__':
import doctest
doctest.testmod()
| false
| true
|
790c17c0e7dada81311cf78f892c1f870b0a57e2
| 2,139
|
py
|
Python
|
Bing Wallpaper/GetWallpaper.py
|
networkprogrammer/bing-wallpaper-for-mac
|
e0d840ae445bb17efc80f6cc3d7698c340992985
|
[
"MIT"
] | 86
|
2015-04-02T21:21:05.000Z
|
2022-01-09T18:57:49.000Z
|
Bing Wallpaper/GetWallpaper.py
|
networkprogrammer/bing-wallpaper-for-mac
|
e0d840ae445bb17efc80f6cc3d7698c340992985
|
[
"MIT"
] | 13
|
2015-04-10T11:11:00.000Z
|
2020-05-09T13:08:20.000Z
|
Bing Wallpaper/GetWallpaper.py
|
networkprogrammer/bing-wallpaper-for-mac
|
e0d840ae445bb17efc80f6cc3d7698c340992985
|
[
"MIT"
] | 20
|
2015-04-02T20:08:39.000Z
|
2022-03-02T12:11:55.000Z
|
import json
#Try with python3
try:
from urllib.request import urlopen, urlretrieve
from urllib.request import urlretrieve
#Else try python2
except:
from urllib2 import urlopen
from urllib import urlretrieve
from os import path
#User home folder
homeFolder = path.expanduser("~")
#Save pictures to a folder
pictureLocation = homeFolder + "/Downloads/"
def main():
########Defining variables#######
#URL in json format for latest wallpaper
url = "http://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1&mkt=en-US"
getHighRes = 1 #Manually change the resolution in the url to 1920x1200. Change to 0 if url breaks.
#Get json response from bing.com
response = urlopen(url)
#Trying python 3
try:
output = response.readall().decode('utf-8')
#Else trying python2
except:
output = response.read()
#Get json output
data = json.loads(output)
#Form image url from json
output_url = "http://www.bing.com/" + data["images"][0]["url"]
#Form 1920x1200 image from above url
output_url_highres = output_url.replace("1080", "1200")
#If higher resolution is preferred(default)
if getHighRes == 1:
#Use try block to catch any failure in getting the high res image
try:
process_url(output_url_highres)
except:
process_url(output_url)
else:
process_url(output_url)
def process_url(image_url):
if not check_url(image_url) == 1:
#Get the filename of the new file from the url
filename = pictureLocation + image_url.split('/')[-1]
#Retrieve the image from the web and save it to desired location
req = urlretrieve(image_url, filename)
#Save the file path + filename to the output variable
bingImage = path.abspath(filename)
print(bingImage)
else:
raise Exception('bad url')
def check_url(image_url):
conn = urlopen(image_url)
if not conn.getcode() == 200:
return 1
main()
| 25.164706
| 103
| 0.624123
|
import json
try:
from urllib.request import urlopen, urlretrieve
from urllib.request import urlretrieve
except:
from urllib2 import urlopen
from urllib import urlretrieve
from os import path
homeFolder = path.expanduser("~")
pictureLocation = homeFolder + "/Downloads/"
def main():
output = response.read()
data = json.loads(output)
output_url = "http://www.bing.com/" + data["images"][0]["url"]
output_url_highres = output_url.replace("1080", "1200")
if getHighRes == 1:
try:
process_url(output_url_highres)
except:
process_url(output_url)
else:
process_url(output_url)
def process_url(image_url):
if not check_url(image_url) == 1:
filename = pictureLocation + image_url.split('/')[-1]
req = urlretrieve(image_url, filename)
bingImage = path.abspath(filename)
print(bingImage)
else:
raise Exception('bad url')
def check_url(image_url):
conn = urlopen(image_url)
if not conn.getcode() == 200:
return 1
main()
| true
| true
|
790c17c3fa179607dfdd445da0e23d4b45f81a5e
| 2,721
|
py
|
Python
|
libcsce/bin/csce.py
|
strozfriedberg/cobaltstrike-config-extractor
|
a2c5ed16d0b6bcbf8967f1c4caa7797acb245d1c
|
[
"Apache-2.0"
] | 74
|
2021-08-23T19:02:30.000Z
|
2022-02-10T23:04:02.000Z
|
libcsce/bin/csce.py
|
strozfriedberg/cobaltstrike-config-extractor
|
a2c5ed16d0b6bcbf8967f1c4caa7797acb245d1c
|
[
"Apache-2.0"
] | 3
|
2021-08-24T19:14:32.000Z
|
2021-09-29T21:23:17.000Z
|
libcsce/bin/csce.py
|
strozfriedberg/cobaltstrike-config-extractor
|
a2c5ed16d0b6bcbf8967f1c4caa7797acb245d1c
|
[
"Apache-2.0"
] | 6
|
2021-08-29T19:32:23.000Z
|
2022-01-24T08:09:00.000Z
|
#!/usr/bin/env python3
## Copyright 2021 Aon plc
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from argparse import ArgumentParser, Namespace
import json
import logging
from pathlib import Path
from typing import Any, Dict
from libcsce.error import CobaltStrikeError
from libcsce.parser import CobaltStrikeConfigParser
from libcsce.utils import JSONEncoderWithBinarySupport
__version__ = "0.1.0"
logger = logging.getLogger("csce")
def csce(args: Namespace):
"""Parse configuration options from Cobalt Strike Beacon."""
if not args.source.is_file():
logger.error("Source path does not exist or is not file")
return 1
if args.cs_version:
version_list = [args.cs_version]
else:
version_list = list(CobaltStrikeConfigParser.SUPPORTED_VERSIONS)
config: Dict[str, Any] = dict()
for version in version_list:
with CobaltStrikeConfigParser(args.source, version) as parser:
try:
config = parser.parse_config()
break
except CobaltStrikeError:
pass
print(
json.dumps(
config,
indent=(2 if args.pretty else None),
cls=JSONEncoderWithBinarySupport,
)
)
return 0
def gen_command_parser() -> ArgumentParser:
parser = ArgumentParser(
description="Parse Cobalt Strike beacon configuration from PE file or memory dump."
)
parser.add_argument(
"-V", "--version", action="version", version=f"%(prog)s {__version__}"
)
parser.add_argument(
"--pretty", action="store_true", help="Pretty-print JSON output", dest="pretty"
)
parser.add_argument(
"-v",
"--cs-version",
type=int,
choices=CobaltStrikeConfigParser.SUPPORTED_VERSIONS,
help="Cobalt Strike version. If not specified, will try all supported versions",
dest="cs_version",
)
parser.add_argument("source", type=Path, help="Path to PE file or memory dump")
parser.set_defaults(func=csce)
return parser
def main() -> int:
parser = gen_command_parser()
args = parser.parse_args()
return args.func(args)
if __name__ == "__main__":
main()
| 29.576087
| 91
| 0.674752
|
s_version]
else:
version_list = list(CobaltStrikeConfigParser.SUPPORTED_VERSIONS)
config: Dict[str, Any] = dict()
for version in version_list:
with CobaltStrikeConfigParser(args.source, version) as parser:
try:
config = parser.parse_config()
break
except CobaltStrikeError:
pass
print(
json.dumps(
config,
indent=(2 if args.pretty else None),
cls=JSONEncoderWithBinarySupport,
)
)
return 0
def gen_command_parser() -> ArgumentParser:
parser = ArgumentParser(
description="Parse Cobalt Strike beacon configuration from PE file or memory dump."
)
parser.add_argument(
"-V", "--version", action="version", version=f"%(prog)s {__version__}"
)
parser.add_argument(
"--pretty", action="store_true", help="Pretty-print JSON output", dest="pretty"
)
parser.add_argument(
"-v",
"--cs-version",
type=int,
choices=CobaltStrikeConfigParser.SUPPORTED_VERSIONS,
help="Cobalt Strike version. If not specified, will try all supported versions",
dest="cs_version",
)
parser.add_argument("source", type=Path, help="Path to PE file or memory dump")
parser.set_defaults(func=csce)
return parser
def main() -> int:
parser = gen_command_parser()
args = parser.parse_args()
return args.func(args)
if __name__ == "__main__":
main()
| true
| true
|
790c17e6fde2cc016c39e0908d2ce033722ac1af
| 807
|
py
|
Python
|
app/topology/server.py
|
kukkalli/orchestrator
|
0b53e3f95c0a886a739cf08d611ea76c958bc691
|
[
"Apache-2.0"
] | 1
|
2022-03-02T09:43:45.000Z
|
2022-03-02T09:43:45.000Z
|
app/topology/server.py
|
kukkalli/orchestrator
|
0b53e3f95c0a886a739cf08d611ea76c958bc691
|
[
"Apache-2.0"
] | null | null | null |
app/topology/server.py
|
kukkalli/orchestrator
|
0b53e3f95c0a886a739cf08d611ea76c958bc691
|
[
"Apache-2.0"
] | null | null | null |
import logging
from openstack_internal.nova.hypervisor_details import OSHypervisor
from topology.link import Link
from topology.node import Node
LOG = logging.getLogger(__name__)
class Server(Node):
def __init__(self, int_id: int, hypervisor: OSHypervisor):
super().__init__(int_id=int_id, _id=hypervisor.get_id(), name=hypervisor.get_name(), is_switch=False)
print(f"Server Name: {self.name}")
self.cpu = hypervisor.get_available_vcpus()
self.hdd = hypervisor.get_available_disk_gb()
self.ram = hypervisor.get_available_ram_mb()
self.in_links: Link or None = None
self.out_links: Link or None = None
def add_in_link(self, link: Link):
self.in_links = link
def add_out_link(self, link: Link):
self.out_links = link
| 29.888889
| 109
| 0.705081
|
import logging
from openstack_internal.nova.hypervisor_details import OSHypervisor
from topology.link import Link
from topology.node import Node
LOG = logging.getLogger(__name__)
class Server(Node):
def __init__(self, int_id: int, hypervisor: OSHypervisor):
super().__init__(int_id=int_id, _id=hypervisor.get_id(), name=hypervisor.get_name(), is_switch=False)
print(f"Server Name: {self.name}")
self.cpu = hypervisor.get_available_vcpus()
self.hdd = hypervisor.get_available_disk_gb()
self.ram = hypervisor.get_available_ram_mb()
self.in_links: Link or None = None
self.out_links: Link or None = None
def add_in_link(self, link: Link):
self.in_links = link
def add_out_link(self, link: Link):
self.out_links = link
| true
| true
|
790c1851446c0505db70f06ac267d91ce165893d
| 466
|
py
|
Python
|
tests/test_tidy_csv_to_sureal_json.py
|
PotasnikM/translator-to-suJSON
|
abb2001c78d431bd2087754666bc896ba0543dfd
|
[
"MIT"
] | null | null | null |
tests/test_tidy_csv_to_sureal_json.py
|
PotasnikM/translator-to-suJSON
|
abb2001c78d431bd2087754666bc896ba0543dfd
|
[
"MIT"
] | null | null | null |
tests/test_tidy_csv_to_sureal_json.py
|
PotasnikM/translator-to-suJSON
|
abb2001c78d431bd2087754666bc896ba0543dfd
|
[
"MIT"
] | null | null | null |
from sujson.Csv2json import Csv2json
import unittest
import filecmp
class ConvertCsvToJson(unittest.TestCase):
def setUp(self):
self.csv_to_json = Csv2json()
def test_conversion(self):
self.csv_to_json.load("files/Netflix.csv", delimiter=";")
self.csv_to_json.convert("files/Netflix_jtest.json")
self.assertTrue(filecmp.cmp("files/Netflix_jtest.json", "files/Netflix.json"))
if __name__ == '__main__':
unittest.main()
| 27.411765
| 86
| 0.712446
|
from sujson.Csv2json import Csv2json
import unittest
import filecmp
class ConvertCsvToJson(unittest.TestCase):
def setUp(self):
self.csv_to_json = Csv2json()
def test_conversion(self):
self.csv_to_json.load("files/Netflix.csv", delimiter=";")
self.csv_to_json.convert("files/Netflix_jtest.json")
self.assertTrue(filecmp.cmp("files/Netflix_jtest.json", "files/Netflix.json"))
if __name__ == '__main__':
unittest.main()
| true
| true
|
790c1a97e465dbe15e1bde0bc0ddfafb5b1719ae
| 19,017
|
py
|
Python
|
resdk/tests/functional/data_upload/e2e_upload.py
|
tristanbrown/resolwe-bio-py
|
c911defde8a5e7e902ad1adf4f9e480f17002c18
|
[
"Apache-2.0"
] | null | null | null |
resdk/tests/functional/data_upload/e2e_upload.py
|
tristanbrown/resolwe-bio-py
|
c911defde8a5e7e902ad1adf4f9e480f17002c18
|
[
"Apache-2.0"
] | null | null | null |
resdk/tests/functional/data_upload/e2e_upload.py
|
tristanbrown/resolwe-bio-py
|
c911defde8a5e7e902ad1adf4f9e480f17002c18
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=missing-docstring
# pylint: disable=unbalanced-tuple-unpacking
import os
from resdk.tests.functional.base import BaseResdkFunctionalTest
class TestUpload(BaseResdkFunctionalTest):
def get_samplesheet(self):
"""Return path of an annotation samplesheet."""
files_path = os.path.normpath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..',
'..',
'files',
)
)
samplesheet_name = 'annotation_spreadsheet.xlsm'
return os.path.join(files_path, samplesheet_name)
def test_annotate(self):
# Create the collection with named, unannotated samples
collection = self.res.collection.create(name='Test annotate collection')
reads_1, reads_2, reads_2b, reads_4, reads_5 = self.get_reads(5, collection)
bam = self.get_bams(1, collection)[0]
# Two different samples
sample_1 = reads_1.sample
sample_1.name = 'Sample 1'
sample_1.save()
sample_2 = reads_2.sample
sample_2.name = 'Sample 2'
sample_2.save()
# A duplicated sample
sample_2b = reads_2b.sample
sample_2b.name = 'Sample 2'
sample_2b.save()
# A sample derived from an alignment file
sample_3 = bam.sample
sample_3.name = 'Sample 3'
sample_3.save()
# Missing organism
sample_4 = reads_4.sample
sample_4.name = 'missing organism'
sample_4.save()
# Missing source
sample_5 = reads_5.sample
sample_5.name = 'missing source'
sample_5.save()
# Apply the sample annotations from a local spreadsheet
samplesheet = self.get_samplesheet()
with self.assertLogs() as logs:
collection.annotate(samplesheet)
# Check the error logging
self.assertEqual(len(logs.output), 14)
# Invalid annotations are individually logged and described
samplesheet_errprefix = "ERROR:resdk.data_upload.samplesheet:"
samplesheet_errors = [
"For the sample, '', '' is not a valid SAMPLE_NAME.",
"For the sample, 'missing annotator', '' is not a valid ANNOTATOR.",
"For the sample, 'missing organism', '' is not a valid ORGANISM.",
"For the sample, 'missing source', '' is not a valid SOURCE.",
"For the sample, 'missing molecule', '' is not a valid MOLECULE.",
"For the sample, 'missing seq_type', '' is not a valid SEQ_TYPE.",
"The sample name 'duplicated sample' is duplicated. Please use "
"unique sample names.",
]
for error in samplesheet_errors:
message = samplesheet_errprefix + error
self.assertIn(message, logs.output)
# All samples with invalid annotations are listed
invalid_samples = [
' ,',
'missing annotator',
'missing organism',
'missing source',
'missing molecule',
'missing seq_type',
'duplicated sample',
]
for invalid in invalid_samples:
self.assertIn(invalid, logs.output[7])
# Samples not explictly added should be missing (just check a few)
missing_samples = [
'single-reads',
'paired-reads',
'bad single path',
'bad paired path',
' ,',
'missing annotator',
'missing molecule',
'missing seq_type',
'duplicated sample',
]
for missing in missing_samples:
self.assertIn(missing, logs.output[8])
# But don't claim they're missing when they're not
present_samples = ['Sample 1', 'Sample 2', 'Sample 3',
'missing organism', 'missing source']
for present in present_samples:
self.assertNotIn(present, logs.output[8])
# Duplicate samples raise an error
duplicate_error = ("ERROR:resdk.data_upload.annotate_samples:"
"Multiple samples are queried by the name 'Sample 2'"
". Annotation will not be applied.")
self.assertIn(duplicate_error, logs.output)
# Annotations from the example sheet for Samples 1, 2, and 3
ann_1 = {
'sample': {
'genotype': 'ANIMAL 1:\xa0PBCAG-FUS1, PBCAG-eGFP, PBCAG-mCherry,'
' GLAST-PBase,\xa0PX330-P53',
'cell_type': 'Mixed',
'optional_char': [
'AGE:38 days',
'LIBRARY_STRATEGY:Illumina Standard Prep ',
'OTHER_CHAR_1:2x75 bp',
'OTHER_CHAR_2:subdural cortical tumor, frontal/lateral'
' location. Easily isolated sample',
'TISSUE:Tumor',
],
'strain': '',
'source': 'Tumor',
'organism': 'Rattus norvegicus',
'molecule': 'total RNA',
'annotator': 'Tristan Brown',
'description': '',
}
}
ann_2 = {}
# # Restore if duplicate samples may be annotated.
# ann_2 = {
# 'sample': {
# 'genotype': '',
# 'cell_type': 'Mixed',
# 'optional_char': [
# 'LIBRARY_STRATEGY:Illumina Standard Prep ',
# 'OTHER_CHAR_1:2x75 bp',
# 'OTHER_CHAR_2:subdural cortical tumor, frontal/lateral'
# ' location. Easily isolated sample',
# 'TISSUE:Tumor',
# ],
# 'strain': '',
# 'source': 'Tumor',
# 'organism': 'Homo sapiens',
# 'molecule': 'total RNA',
# 'annotator': 'Tristan Brown',
# 'description': '',
# }
# }
ann_3 = {
'sample': {
'genotype': 'AX4',
'cell_type': '',
'optional_char': [
'LIBRARY_STRATEGY:Illumina Standard Prep ',
'OTHER_CHAR_1:300 bp',
],
'strain': 'Non-aggregating',
'source': 'Cell',
'organism': 'Dictyostelium discoideum',
'molecule': 'genomic DNA',
'annotator': 'Tristan Brown',
'description': '',
}
}
reads_ann_1 = {
'experiment_type': 'RNA-Seq',
'protocols': {'antibody_information': {'manufacturer': ''},
'extract_protocol': 'Standard',
'fragmentation_method': '',
'growth_protocol': 'Standard media',
'library_prep': 'Illumina',
'treatment_protocol': 'Control'},
'reads_info': {'barcode': '', 'facility': '', 'instrument_type': ''},
}
# Check the actual annotation data
sample_1.update()
sample_2.update()
sample_2b.update()
sample_3.update()
self.assertEqual(sample_1.descriptor, ann_1)
self.assertEqual(sample_1.data[0].descriptor, reads_ann_1)
self.assertEqual(sample_1.tags, ['community:rna-seq'])
self.assertEqual(sample_2.descriptor, ann_2)
self.assertEqual(sample_2b.descriptor, ann_2)
self.assertEqual(sample_3.descriptor, ann_3)
self.assertEqual(sample_4.descriptor, {})
self.assertEqual(sample_5.descriptor, {})
def test_export(self):
# Create the collection with named, unannotated samples
collection = self.res.collection.create(name='Test export annotation')
reads_1, reads_2 = self.get_reads(2, collection)
# Two different samples
sample_1 = reads_1.sample
sample_1.name = 'Sample 1'
sample_1.save()
sample_2 = reads_2.sample
sample_2.name = 'Sample 2'
ann_2 = {
'sample': {
'genotype': '',
'cell_type': 'Mixed',
'optional_char': [
'LIBRARY_STRATEGY:Illumina Standard Prep ',
'OTHER_CHAR_1:2x75 bp',
'OTHER_CHAR_2:subdural cortical tumor, frontal/lateral'
' location. Easily isolated sample',
'TISSUE:Tumor',
],
'strain': 'N/A',
'source': 'Tumor',
'organism': 'Homo sapiens',
'molecule': 'total RNA',
'annotator': 'Tristan Brown',
'description': '',
}
}
sample_2.descriptor_schema = 'sample'
sample_2.descriptor = ann_2
sample_2.save()
reads_ann = {
'experiment_type': 'RNA-Seq',
'protocols': {
'growth_protocol': 'N/A',
'treatment_protocol': 'Control',
}
}
reads_2.descriptor_schema = 'reads'
reads_2.descriptor = reads_ann
reads_2.save()
# Export the new template
filepath = 'annotation_template1.xlsm'
try:
os.remove(filepath)
except OSError:
pass
with self.assertLogs() as logs:
collection.export_annotation(filepath)
assert os.path.exists(filepath)
# TODO: Find a robust hash check for .xls* files
os.remove(filepath)
# Check the error logging
self.assertEqual(len(logs.output), 3)
not_annotated = ("WARNING:resdk.data_upload.samplesheet:Sample 'Sample 1'"
" reads not annotated.")
self.assertIn(not_annotated, logs.output)
location = ("INFO:resdk.data_upload.annotate_samples:\nSample annotation"
" template exported to annotation_template1.xlsm.\n")
self.assertIn(location, logs.output)
def test_upload_reads(self):
# Create a collection, find the samplesheet, and upload the reads
collection = self.res.collection.create(name='Test upload reads')
samplesheet = self.get_samplesheet()
with self.assertLogs() as logs:
collection.upload_reads(samplesheet, basedir='files')
# Check the error logging
self.assertEqual(len(logs.output), 37)
upload_errprefix = "ERROR:resdk.data_upload.reads:"
# Examples of each upload error case:
upload_errs = [
"Skipping upload of 'Sample 1': No forward reads given.",
"File /storage/61_cat_R1_001.fastq.gz not found.",
"File /storage/63_cat_R1_001.fastq.gz not found.",
"Skipping upload of '01_1-1_IP_plus': Invalid file extension(s). "
"(Options: .fq, .fastq)",
"Skipping upload of 'missing barcode': Invalid file extension(s). "
"(Options: .fq, .fastq)",
"Skipping upload of 'bad extension': Invalid file extension(s). "
"(Options: .fq, .fastq)",
]
for error in upload_errs:
message = upload_errprefix + error
self.assertIn(message, logs.output)
# All samples that can't be uploaded are listed
upload_fail = [
'Sample 1',
'Sample 2',
'Sample 3',
'bad single path',
'bad paired path',
' ,',
'missing annotator',
'missing organism',
'missing source',
'missing molecule',
'missing seq_type',
'01_1-1_IP_plus',
'02_1-1_IP_minus',
'missing barcode',
'other barcode',
'01_1-1_IP_plus2',
'02_1-1_IP_minus2',
'duplicated sample',
'invalid qseq',
'invalid qseq2',
'bad extension',
]
for invalid in upload_fail:
self.assertIn(invalid, logs.output[31])
# Samples not uploaded should be missing
for missing in upload_fail:
self.assertIn(missing, logs.output[32])
# Don't claim it's invalid or missing if it was uploaded
upload_success = ['single-reads', 'paired-reads']
for uploaded in upload_success:
self.assertNotIn(uploaded, logs.output[31])
self.assertNotIn(uploaded, logs.output[32])
# Check the data objects
names = [sample.name for sample in collection.samples]
self.assertIn('single-reads', names)
self.assertIn('paired-reads', names)
# Try to duplicate the upload and fail
with self.assertLogs() as logs2:
collection.upload_reads(samplesheet, basedir='files')
already_up = [
"Skipping upload of 'single-reads': File already uploaded.",
"Skipping upload of 'paired-reads': File already uploaded.",
]
for error in already_up:
message = upload_errprefix + error
self.assertIn(message, logs2.output)
self.assertEqual(len(collection.data), 2)
self.assertEqual(len(collection.samples), 2)
# TODO: Cannot test this part because processes do not complete on Jenkins
# TODO: Check sample files and annotations in resolwe-bio when possible
# sample1 = collection.samples.get(name='single-reads')
# sample2 = collection.samples.get(name='paired-reads')
# wait_process_complete(sample1.data[0], 1, 10)
# wait_process_complete(sample2.data[0], 1, 10)
# file0 = 'reads.fastq.gz'
# file1 = 'reads_paired_abyss_1.fastq.gz'
# file2 = 'reads_paired_abyss_2.fastq.gz'
# self.assertIn(file0, sample1.files())
# self.assertIn(file1, sample2.files())
# self.assertIn(file2, sample2.files())
# self.assertEqual(sample1.descriptor['sample']['organism'], 'Mus musculus')
# self.assertEqual(sample2.descriptor['sample']['organism'], 'Rattus norvegicus')
# Test export of the annotated template
filepath = 'annotation_template2.xlsm'
try:
os.remove(filepath)
except OSError:
pass
collection.export_annotation(filepath)
assert os.path.exists(filepath)
# TODO: Find a robust hash check for .xls* files
os.remove(filepath)
def test_upload_multiplexed(self):
# Create a collection, find the samplesheet, and upload the reads
collection = self.res.collection.create(name='Test upload multiplexed')
samplesheet = self.get_samplesheet()
with self.assertLogs() as logs:
collection.upload_demulti(samplesheet, basedir='files')
# Check the error logging
self.assertEqual(len(logs.output), 39)
upload_errprefix = "ERROR:resdk.data_upload.multiplexed:"
# Examples of each upload error case:
upload_errs = [
"Skipping upload of 'reads.fastq.gz': No barcodes file given.",
"Skipping upload of 'reads_paired_abyss_1.fastq.gz': "
"No barcodes file given.",
"Skipping upload of '': No forward reads given.",
"Skipping upload of 'dummy.qseq': Missing barcode.",
"Skipping upload of 'pool24.read1.small.fastq.bz2': Invalid file "
"extension(s). (Options: .qseq)",
"Skipping upload of 'pool24c.read1.small.qseq.bz2': Invalid file "
"extension(s). (Options: .qseq)",
"Demultiplex process not yet complete for 'pool24.read1.small.qseq.bz2'.",
]
for error in upload_errs:
message = upload_errprefix + error
self.assertIn(message, logs.output)
# All samples that can't be uploaded are listed
upload_fail = [
'single-reads',
'paired-reads',
'Sample 1',
'Sample 2',
'Sample 3',
'bad single path',
'bad paired path',
' ,',
'missing annotator',
'missing organism',
'missing source',
'missing molecule',
'missing seq_type',
'missing barcode',
'other barcode',
'01_1-1_IP_plus2',
'02_1-1_IP_minus2',
'duplicated sample',
'invalid qseq',
'invalid qseq2',
'bad extension',
]
for invalid in upload_fail:
self.assertIn(invalid, logs.output[35])
# Samples not uploaded should be missing
for missing in upload_fail:
self.assertIn(missing, logs.output[36])
# Don't claim it's invalid if it was uploaded
upload_success = ['01_1-1_IP_plus,', '02_1-1_IP_minus,']
for uploaded in upload_success:
self.assertNotIn(uploaded, logs.output[35])
# Check the file is actually uploaded
names = [data.name for data in collection.data]
qseq = 'pool24.read1.small.qseq.bz2'
self.assertIn(qseq, names)
# Try to duplicate the upload and fail
with self.assertLogs() as logs2:
collection.upload_demulti(samplesheet, basedir='files')
already_up = (
upload_errprefix
+ "Skipping upload of 'pool24.read1.small.qseq.bz2': File already uploaded."
)
self.assertIn(already_up, logs2.output)
names = [data.name for data in collection.data]
names.remove(qseq)
self.assertNotIn(qseq, names)
# TODO: Cannot test this part because processes do not complete on Jenkins
# TODO: Check sample files and annotations in resolwe-bio when possible
# for data in collection.data:
# wait_process_complete(data, 1, 10) # process the .qseq upload
# collection.update()
# for data in collection.data:
# wait_process_complete(data, 1, 10) # process the demultiplexed child data
# collection.upload_demulti(samplesheet)
# collection.update()
# # Check the uploaded data and created samples
# self.assertEqual(len(collection.data), 5)
# self.assertEqual(len(collection.samples), 4)
# names = {sample.name for sample in collection.samples}
# self.assertIn('01_1-1_IP_plus', names)
# self.assertIn('02_1-1_IP_minus', names)
# sample1 = collection.samples.get(name='01_1-1_IP_plus')
# sample2 = collection.samples.get(name='02_1-1_IP_minus')
# file1 = 'pool24_01_1-1_IP_plus_TCGCAGG_mate1.fastq.gz'
# file2 = 'pool24_02_1-1_IP_minus_CTCTGCA_mate2.fastq.gz'
# self.assertIn(file1, sample1.files())
# self.assertIn(file2, sample2.files())
# self.assertEqual(sample1.descriptor['sample']['source'], 'Tumor')
# self.assertEqual(sample2.descriptor['sample']['source'], 'Control')
| 38.418182
| 89
| 0.560288
|
import os
from resdk.tests.functional.base import BaseResdkFunctionalTest
class TestUpload(BaseResdkFunctionalTest):
def get_samplesheet(self):
files_path = os.path.normpath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..',
'..',
'files',
)
)
samplesheet_name = 'annotation_spreadsheet.xlsm'
return os.path.join(files_path, samplesheet_name)
def test_annotate(self):
collection = self.res.collection.create(name='Test annotate collection')
reads_1, reads_2, reads_2b, reads_4, reads_5 = self.get_reads(5, collection)
bam = self.get_bams(1, collection)[0]
sample_1 = reads_1.sample
sample_1.name = 'Sample 1'
sample_1.save()
sample_2 = reads_2.sample
sample_2.name = 'Sample 2'
sample_2.save()
sample_2b = reads_2b.sample
sample_2b.name = 'Sample 2'
sample_2b.save()
sample_3 = bam.sample
sample_3.name = 'Sample 3'
sample_3.save()
sample_4 = reads_4.sample
sample_4.name = 'missing organism'
sample_4.save()
sample_5 = reads_5.sample
sample_5.name = 'missing source'
sample_5.save()
samplesheet = self.get_samplesheet()
with self.assertLogs() as logs:
collection.annotate(samplesheet)
self.assertEqual(len(logs.output), 14)
samplesheet_errprefix = "ERROR:resdk.data_upload.samplesheet:"
samplesheet_errors = [
"For the sample, '', '' is not a valid SAMPLE_NAME.",
"For the sample, 'missing annotator', '' is not a valid ANNOTATOR.",
"For the sample, 'missing organism', '' is not a valid ORGANISM.",
"For the sample, 'missing source', '' is not a valid SOURCE.",
"For the sample, 'missing molecule', '' is not a valid MOLECULE.",
"For the sample, 'missing seq_type', '' is not a valid SEQ_TYPE.",
"The sample name 'duplicated sample' is duplicated. Please use "
"unique sample names.",
]
for error in samplesheet_errors:
message = samplesheet_errprefix + error
self.assertIn(message, logs.output)
invalid_samples = [
' ,',
'missing annotator',
'missing organism',
'missing source',
'missing molecule',
'missing seq_type',
'duplicated sample',
]
for invalid in invalid_samples:
self.assertIn(invalid, logs.output[7])
missing_samples = [
'single-reads',
'paired-reads',
'bad single path',
'bad paired path',
' ,',
'missing annotator',
'missing molecule',
'missing seq_type',
'duplicated sample',
]
for missing in missing_samples:
self.assertIn(missing, logs.output[8])
present_samples = ['Sample 1', 'Sample 2', 'Sample 3',
'missing organism', 'missing source']
for present in present_samples:
self.assertNotIn(present, logs.output[8])
# Duplicate samples raise an error
duplicate_error = ("ERROR:resdk.data_upload.annotate_samples:"
"Multiple samples are queried by the name 'Sample 2'"
". Annotation will not be applied.")
self.assertIn(duplicate_error, logs.output)
# Annotations from the example sheet for Samples 1, 2, and 3
ann_1 = {
'sample': {
'genotype': 'ANIMAL 1:\xa0PBCAG-FUS1, PBCAG-eGFP, PBCAG-mCherry,'
' GLAST-PBase,\xa0PX330-P53',
'cell_type': 'Mixed',
'optional_char': [
'AGE:38 days',
'LIBRARY_STRATEGY:Illumina Standard Prep ',
'OTHER_CHAR_1:2x75 bp',
'OTHER_CHAR_2:subdural cortical tumor, frontal/lateral'
' location. Easily isolated sample',
'TISSUE:Tumor',
],
'strain': '',
'source': 'Tumor',
'organism': 'Rattus norvegicus',
'molecule': 'total RNA',
'annotator': 'Tristan Brown',
'description': '',
}
}
ann_2 = {}
# # Restore if duplicate samples may be annotated.
# ann_2 = {
# 'sample': {
# 'genotype': '',
# 'cell_type': 'Mixed',
# 'optional_char': [
# 'LIBRARY_STRATEGY:Illumina Standard Prep ',
# 'OTHER_CHAR_1:2x75 bp',
# 'OTHER_CHAR_2:subdural cortical tumor, frontal/lateral'
# ' location. Easily isolated sample',
# 'TISSUE:Tumor',
# ],
# 'strain': '',
# 'source': 'Tumor',
# 'organism': 'Homo sapiens',
# 'molecule': 'total RNA',
# 'annotator': 'Tristan Brown',
# 'description': '',
# }
# }
ann_3 = {
'sample': {
'genotype': 'AX4',
'cell_type': '',
'optional_char': [
'LIBRARY_STRATEGY:Illumina Standard Prep ',
'OTHER_CHAR_1:300 bp',
],
'strain': 'Non-aggregating',
'source': 'Cell',
'organism': 'Dictyostelium discoideum',
'molecule': 'genomic DNA',
'annotator': 'Tristan Brown',
'description': '',
}
}
reads_ann_1 = {
'experiment_type': 'RNA-Seq',
'protocols': {'antibody_information': {'manufacturer': ''},
'extract_protocol': 'Standard',
'fragmentation_method': '',
'growth_protocol': 'Standard media',
'library_prep': 'Illumina',
'treatment_protocol': 'Control'},
'reads_info': {'barcode': '', 'facility': '', 'instrument_type': ''},
}
# Check the actual annotation data
sample_1.update()
sample_2.update()
sample_2b.update()
sample_3.update()
self.assertEqual(sample_1.descriptor, ann_1)
self.assertEqual(sample_1.data[0].descriptor, reads_ann_1)
self.assertEqual(sample_1.tags, ['community:rna-seq'])
self.assertEqual(sample_2.descriptor, ann_2)
self.assertEqual(sample_2b.descriptor, ann_2)
self.assertEqual(sample_3.descriptor, ann_3)
self.assertEqual(sample_4.descriptor, {})
self.assertEqual(sample_5.descriptor, {})
def test_export(self):
# Create the collection with named, unannotated samples
collection = self.res.collection.create(name='Test export annotation')
reads_1, reads_2 = self.get_reads(2, collection)
# Two different samples
sample_1 = reads_1.sample
sample_1.name = 'Sample 1'
sample_1.save()
sample_2 = reads_2.sample
sample_2.name = 'Sample 2'
ann_2 = {
'sample': {
'genotype': '',
'cell_type': 'Mixed',
'optional_char': [
'LIBRARY_STRATEGY:Illumina Standard Prep ',
'OTHER_CHAR_1:2x75 bp',
'OTHER_CHAR_2:subdural cortical tumor, frontal/lateral'
' location. Easily isolated sample',
'TISSUE:Tumor',
],
'strain': 'N/A',
'source': 'Tumor',
'organism': 'Homo sapiens',
'molecule': 'total RNA',
'annotator': 'Tristan Brown',
'description': '',
}
}
sample_2.descriptor_schema = 'sample'
sample_2.descriptor = ann_2
sample_2.save()
reads_ann = {
'experiment_type': 'RNA-Seq',
'protocols': {
'growth_protocol': 'N/A',
'treatment_protocol': 'Control',
}
}
reads_2.descriptor_schema = 'reads'
reads_2.descriptor = reads_ann
reads_2.save()
# Export the new template
filepath = 'annotation_template1.xlsm'
try:
os.remove(filepath)
except OSError:
pass
with self.assertLogs() as logs:
collection.export_annotation(filepath)
assert os.path.exists(filepath)
# TODO: Find a robust hash check for .xls* files
os.remove(filepath)
# Check the error logging
self.assertEqual(len(logs.output), 3)
not_annotated = ("WARNING:resdk.data_upload.samplesheet:Sample 'Sample 1'"
" reads not annotated.")
self.assertIn(not_annotated, logs.output)
location = ("INFO:resdk.data_upload.annotate_samples:\nSample annotation"
" template exported to annotation_template1.xlsm.\n")
self.assertIn(location, logs.output)
def test_upload_reads(self):
# Create a collection, find the samplesheet, and upload the reads
collection = self.res.collection.create(name='Test upload reads')
samplesheet = self.get_samplesheet()
with self.assertLogs() as logs:
collection.upload_reads(samplesheet, basedir='files')
# Check the error logging
self.assertEqual(len(logs.output), 37)
upload_errprefix = "ERROR:resdk.data_upload.reads:"
# Examples of each upload error case:
upload_errs = [
"Skipping upload of 'Sample 1': No forward reads given.",
"File /storage/61_cat_R1_001.fastq.gz not found.",
"File /storage/63_cat_R1_001.fastq.gz not found.",
"Skipping upload of '01_1-1_IP_plus': Invalid file extension(s). "
"(Options: .fq, .fastq)",
"Skipping upload of 'missing barcode': Invalid file extension(s). "
"(Options: .fq, .fastq)",
"Skipping upload of 'bad extension': Invalid file extension(s). "
"(Options: .fq, .fastq)",
]
for error in upload_errs:
message = upload_errprefix + error
self.assertIn(message, logs.output)
# All samples that can't be uploaded are listed
upload_fail = [
'Sample 1',
'Sample 2',
'Sample 3',
'bad single path',
'bad paired path',
' ,',
'missing annotator',
'missing organism',
'missing source',
'missing molecule',
'missing seq_type',
'01_1-1_IP_plus',
'02_1-1_IP_minus',
'missing barcode',
'other barcode',
'01_1-1_IP_plus2',
'02_1-1_IP_minus2',
'duplicated sample',
'invalid qseq',
'invalid qseq2',
'bad extension',
]
for invalid in upload_fail:
self.assertIn(invalid, logs.output[31])
for missing in upload_fail:
self.assertIn(missing, logs.output[32])
upload_success = ['single-reads', 'paired-reads']
for uploaded in upload_success:
self.assertNotIn(uploaded, logs.output[31])
self.assertNotIn(uploaded, logs.output[32])
names = [sample.name for sample in collection.samples]
self.assertIn('single-reads', names)
self.assertIn('paired-reads', names)
with self.assertLogs() as logs2:
collection.upload_reads(samplesheet, basedir='files')
already_up = [
"Skipping upload of 'single-reads': File already uploaded.",
"Skipping upload of 'paired-reads': File already uploaded.",
]
for error in already_up:
message = upload_errprefix + error
self.assertIn(message, logs2.output)
self.assertEqual(len(collection.data), 2)
self.assertEqual(len(collection.samples), 2)
filepath = 'annotation_template2.xlsm'
try:
os.remove(filepath)
except OSError:
pass
collection.export_annotation(filepath)
assert os.path.exists(filepath)
os.remove(filepath)
def test_upload_multiplexed(self):
collection = self.res.collection.create(name='Test upload multiplexed')
samplesheet = self.get_samplesheet()
with self.assertLogs() as logs:
collection.upload_demulti(samplesheet, basedir='files')
self.assertEqual(len(logs.output), 39)
upload_errprefix = "ERROR:resdk.data_upload.multiplexed:"
upload_errs = [
"Skipping upload of 'reads.fastq.gz': No barcodes file given.",
"Skipping upload of 'reads_paired_abyss_1.fastq.gz': "
"No barcodes file given.",
"Skipping upload of '': No forward reads given.",
"Skipping upload of 'dummy.qseq': Missing barcode.",
"Skipping upload of 'pool24.read1.small.fastq.bz2': Invalid file "
"extension(s). (Options: .qseq)",
"Skipping upload of 'pool24c.read1.small.qseq.bz2': Invalid file "
"extension(s). (Options: .qseq)",
"Demultiplex process not yet complete for 'pool24.read1.small.qseq.bz2'.",
]
for error in upload_errs:
message = upload_errprefix + error
self.assertIn(message, logs.output)
upload_fail = [
'single-reads',
'paired-reads',
'Sample 1',
'Sample 2',
'Sample 3',
'bad single path',
'bad paired path',
' ,',
'missing annotator',
'missing organism',
'missing source',
'missing molecule',
'missing seq_type',
'missing barcode',
'other barcode',
'01_1-1_IP_plus2',
'02_1-1_IP_minus2',
'duplicated sample',
'invalid qseq',
'invalid qseq2',
'bad extension',
]
for invalid in upload_fail:
self.assertIn(invalid, logs.output[35])
# Samples not uploaded should be missing
for missing in upload_fail:
self.assertIn(missing, logs.output[36])
# Don't claim it's invalid if it was uploaded
upload_success = ['01_1-1_IP_plus,', '02_1-1_IP_minus,']
for uploaded in upload_success:
self.assertNotIn(uploaded, logs.output[35])
# Check the file is actually uploaded
names = [data.name for data in collection.data]
qseq = 'pool24.read1.small.qseq.bz2'
self.assertIn(qseq, names)
# Try to duplicate the upload and fail
with self.assertLogs() as logs2:
collection.upload_demulti(samplesheet, basedir='files')
already_up = (
upload_errprefix
+ "Skipping upload of 'pool24.read1.small.qseq.bz2': File already uploaded."
)
self.assertIn(already_up, logs2.output)
names = [data.name for data in collection.data]
names.remove(qseq)
self.assertNotIn(qseq, names)
# TODO: Cannot test this part because processes do not complete on Jenkins
# TODO: Check sample files and annotations in resolwe-bio when possible
# for data in collection.data:
# wait_process_complete(data, 1, 10) # process the .qseq upload
# collection.update()
# for data in collection.data:
# wait_process_complete(data, 1, 10) # process the demultiplexed child data
# collection.upload_demulti(samplesheet)
# collection.update()
# # Check the uploaded data and created samples
# self.assertEqual(len(collection.data), 5)
# self.assertEqual(len(collection.samples), 4)
# names = {sample.name for sample in collection.samples}
# self.assertIn('01_1-1_IP_plus', names)
# self.assertIn('02_1-1_IP_minus', names)
# sample1 = collection.samples.get(name='01_1-1_IP_plus')
# sample2 = collection.samples.get(name='02_1-1_IP_minus')
# file1 = 'pool24_01_1-1_IP_plus_TCGCAGG_mate1.fastq.gz'
# file2 = 'pool24_02_1-1_IP_minus_CTCTGCA_mate2.fastq.gz'
# self.assertIn(file1, sample1.files())
# self.assertIn(file2, sample2.files())
# self.assertEqual(sample1.descriptor['sample']['source'], 'Tumor')
# self.assertEqual(sample2.descriptor['sample']['source'], 'Control')
| true
| true
|
790c1beff59b54ffeb4ba62b5a503efc5c1d2929
| 7,835
|
py
|
Python
|
py/server/tests/test_kafka_consumer.py
|
lbooker42/deephaven-core
|
2d04563f18ae914754b28041475c02770e57af15
|
[
"MIT"
] | null | null | null |
py/server/tests/test_kafka_consumer.py
|
lbooker42/deephaven-core
|
2d04563f18ae914754b28041475c02770e57af15
|
[
"MIT"
] | null | null | null |
py/server/tests/test_kafka_consumer.py
|
lbooker42/deephaven-core
|
2d04563f18ae914754b28041475c02770e57af15
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending
#
import os
import unittest
from deephaven import kafka_consumer as ck
from deephaven.stream.kafka.consumer import TableType, KeyValueSpec
from tests.testbase import BaseTestCase
from deephaven import dtypes
class KafkaConsumerTestCase(BaseTestCase):
def _assert_common_cols(self, cols):
self.assertEqual("KafkaPartition", cols[0].name)
self.assertEqual(dtypes.int32, cols[0].data_type)
self.assertEqual("KafkaOffset", cols[1].name)
self.assertEqual(dtypes.long, cols[1].data_type)
self.assertEqual("KafkaTimestamp", cols[2].name)
self.assertEqual(dtypes.DateTime, cols[2].data_type)
def test_basic_constants(self):
"""
Check that the basic constants are imported and visible.
"""
self.assertIsNotNone(ck.SEEK_TO_BEGINNING)
self.assertIsNotNone(ck.DONT_SEEK)
self.assertIsNotNone(ck.SEEK_TO_END)
self.assertIsNotNone(ck.ALL_PARTITIONS_SEEK_TO_BEGINNING)
self.assertIsNotNone(ck.ALL_PARTITIONS_SEEK_TO_END)
self.assertIsNotNone(ck.ALL_PARTITIONS_DONT_SEEK)
def test_simple_spec(self):
"""
Check a simple Kafka subscription creates the right table.
"""
t = ck.consume(
{'bootstrap.servers': 'redpanda:29092'},
'orders',
key_spec=KeyValueSpec.IGNORE,
value_spec=ck.simple_spec('Price', dtypes.double))
cols = t.columns
self.assertEqual(4, len(cols))
self._assert_common_cols(cols)
self.assertEqual("Price", cols[3].name)
self.assertEqual(dtypes.double, cols[3].data_type)
def test_json_spec(self):
"""
Check a JSON Kafka subscription creates the right table.
"""
t = ck.consume(
{'bootstrap.servers': 'redpanda:29092'},
'orders',
key_spec=KeyValueSpec.IGNORE,
value_spec=ck.json_spec(
[('Symbol', dtypes.string),
('Side', dtypes.string),
('Price', dtypes.double),
('Qty', dtypes.int_),
('Tstamp', dtypes.DateTime)],
mapping={
'jsymbol': 'Symbol',
'jside': 'Side',
'jprice': 'Price',
'jqty': 'Qty',
'jts': 'Tstamp'
}
),
table_type=TableType.append()
)
cols = t.columns
self.assertEqual(8, len(cols))
self._assert_common_cols(cols)
self.assertEqual("Symbol", cols[3].name)
self.assertEqual(dtypes.string, cols[3].data_type)
self.assertEqual("Side", cols[4].name)
self.assertEqual(dtypes.string, cols[4].data_type)
self.assertEqual("Price", cols[5].name)
self.assertEqual(dtypes.double, cols[5].data_type)
self.assertEqual("Qty", cols[6].name)
self.assertEqual(dtypes.int_, cols[6].data_type)
self.assertEqual("Tstamp", cols[7].name)
self.assertEqual(dtypes.DateTime, cols[7].data_type)
def test_avro_spec(self):
"""
Check an Avro Kafka subscription creates the right table.
"""
schema = \
"""
{ "type" : "record",
"namespace" : "io.deephaven.examples",
"name" : "share_price",
"fields" : [
{ "name" : "Symbol", "type" : "string" },
{ "name" : "Side", "type" : "string" },
{ "name" : "Qty", "type" : "int" },
{ "name" : "Price", "type" : "double" }
]
}
"""
schema_str = '{ "schema" : "%s" }' % \
schema.replace('\n', ' ').replace('"', '\\"')
sys_str = \
"""
curl -X POST \
-H 'Content-type: application/vnd.schemaregistry.v1+json; artifactType=AVRO' \
--data-binary '%s' \
http://redpanda:8081/subjects/share_price_record/versions
""" % schema_str
r = os.system(sys_str)
self.assertEqual(0, r)
with self.subTest(msg='straight schema, no mapping'):
t = ck.consume(
{
'bootstrap.servers': 'redpanda:29092',
'schema.registry.url': 'http://redpanda:8081'
},
'share_price',
key_spec=KeyValueSpec.IGNORE,
value_spec=ck.avro_spec('share_price_record', schema_version='1'),
table_type=TableType.append()
)
cols = t.columns
self.assertEqual(7, len(cols))
self._assert_common_cols(cols)
self.assertEqual("Symbol", cols[3].name)
self.assertEqual(dtypes.string, cols[3].data_type)
self.assertEqual("Side", cols[4].name)
self.assertEqual(dtypes.string, cols[4].data_type)
self.assertEqual("Qty", cols[5].name)
self.assertEqual(dtypes.int32, cols[5].data_type)
self.assertEqual("Price", cols[6].name)
self.assertEqual(dtypes.double, cols[6].data_type)
with self.subTest(msg='mapping_only (filter out some schema fields)'):
m = {'Symbol': 'Ticker', 'Price': 'Dollars'}
t = ck.consume(
{
'bootstrap.servers': 'redpanda:29092',
'schema.registry.url': 'http://redpanda:8081'
},
'share_price',
key_spec=KeyValueSpec.IGNORE,
value_spec=ck.avro_spec('share_price_record', mapping=m, mapped_only=True),
table_type=TableType.append()
)
cols = t.columns
self.assertEqual(5, len(cols))
self._assert_common_cols(cols)
self.assertEqual("Ticker", cols[3].name)
self.assertEqual(dtypes.string, cols[3].data_type)
self.assertEqual("Dollars", cols[4].name)
self.assertEqual(dtypes.double, cols[4].data_type)
with self.subTest(msg='mapping (rename some fields)'):
m = {'Symbol': 'Ticker', 'Qty': 'Quantity'}
t = ck.consume(
{
'bootstrap.servers': 'redpanda:29092',
'schema.registry.url': 'http://redpanda:8081'
},
'share_price',
key_spec=KeyValueSpec.IGNORE,
value_spec=ck.avro_spec('share_price_record', mapping=m),
table_type=TableType.append()
)
cols = t.columns
self.assertEqual(7, len(cols))
self._assert_common_cols(cols)
self.assertEqual("Ticker", cols[3].name)
self.assertEqual(dtypes.string, cols[3].data_type)
self.assertEqual("Side", cols[4].name)
self.assertEqual(dtypes.string, cols[4].data_type)
self.assertEqual("Quantity", cols[5].name)
self.assertEqual(dtypes.int32, cols[5].data_type)
self.assertEqual("Price", cols[6].name)
self.assertEqual(dtypes.double, cols[6].data_type)
@unittest.skip("https://github.com/deephaven/deephaven-core/pull/2277")
def test_deprecated_table_types(self):
"""
Tests to make sure deprecated TableTypes are equivalent
"""
self.assertEqual(TableType.append(), TableType.Append)
self.assertEqual(TableType.stream(), TableType.Stream)
def test_table_types(self):
"""
Tests TableType construction
"""
_ = TableType.append()
_ = TableType.stream()
_ = TableType.ring(4096)
if __name__ == "__main__":
unittest.main()
| 36.105991
| 94
| 0.5515
|
import os
import unittest
from deephaven import kafka_consumer as ck
from deephaven.stream.kafka.consumer import TableType, KeyValueSpec
from tests.testbase import BaseTestCase
from deephaven import dtypes
class KafkaConsumerTestCase(BaseTestCase):
def _assert_common_cols(self, cols):
self.assertEqual("KafkaPartition", cols[0].name)
self.assertEqual(dtypes.int32, cols[0].data_type)
self.assertEqual("KafkaOffset", cols[1].name)
self.assertEqual(dtypes.long, cols[1].data_type)
self.assertEqual("KafkaTimestamp", cols[2].name)
self.assertEqual(dtypes.DateTime, cols[2].data_type)
def test_basic_constants(self):
self.assertIsNotNone(ck.SEEK_TO_BEGINNING)
self.assertIsNotNone(ck.DONT_SEEK)
self.assertIsNotNone(ck.SEEK_TO_END)
self.assertIsNotNone(ck.ALL_PARTITIONS_SEEK_TO_BEGINNING)
self.assertIsNotNone(ck.ALL_PARTITIONS_SEEK_TO_END)
self.assertIsNotNone(ck.ALL_PARTITIONS_DONT_SEEK)
def test_simple_spec(self):
t = ck.consume(
{'bootstrap.servers': 'redpanda:29092'},
'orders',
key_spec=KeyValueSpec.IGNORE,
value_spec=ck.simple_spec('Price', dtypes.double))
cols = t.columns
self.assertEqual(4, len(cols))
self._assert_common_cols(cols)
self.assertEqual("Price", cols[3].name)
self.assertEqual(dtypes.double, cols[3].data_type)
def test_json_spec(self):
t = ck.consume(
{'bootstrap.servers': 'redpanda:29092'},
'orders',
key_spec=KeyValueSpec.IGNORE,
value_spec=ck.json_spec(
[('Symbol', dtypes.string),
('Side', dtypes.string),
('Price', dtypes.double),
('Qty', dtypes.int_),
('Tstamp', dtypes.DateTime)],
mapping={
'jsymbol': 'Symbol',
'jside': 'Side',
'jprice': 'Price',
'jqty': 'Qty',
'jts': 'Tstamp'
}
),
table_type=TableType.append()
)
cols = t.columns
self.assertEqual(8, len(cols))
self._assert_common_cols(cols)
self.assertEqual("Symbol", cols[3].name)
self.assertEqual(dtypes.string, cols[3].data_type)
self.assertEqual("Side", cols[4].name)
self.assertEqual(dtypes.string, cols[4].data_type)
self.assertEqual("Price", cols[5].name)
self.assertEqual(dtypes.double, cols[5].data_type)
self.assertEqual("Qty", cols[6].name)
self.assertEqual(dtypes.int_, cols[6].data_type)
self.assertEqual("Tstamp", cols[7].name)
self.assertEqual(dtypes.DateTime, cols[7].data_type)
def test_avro_spec(self):
schema = \
"""
{ "type" : "record",
"namespace" : "io.deephaven.examples",
"name" : "share_price",
"fields" : [
{ "name" : "Symbol", "type" : "string" },
{ "name" : "Side", "type" : "string" },
{ "name" : "Qty", "type" : "int" },
{ "name" : "Price", "type" : "double" }
]
}
"""
schema_str = '{ "schema" : "%s" }' % \
schema.replace('\n', ' ').replace('"', '\\"')
sys_str = \
"""
curl -X POST \
-H 'Content-type: application/vnd.schemaregistry.v1+json; artifactType=AVRO' \
--data-binary '%s' \
http://redpanda:8081/subjects/share_price_record/versions
""" % schema_str
r = os.system(sys_str)
self.assertEqual(0, r)
with self.subTest(msg='straight schema, no mapping'):
t = ck.consume(
{
'bootstrap.servers': 'redpanda:29092',
'schema.registry.url': 'http://redpanda:8081'
},
'share_price',
key_spec=KeyValueSpec.IGNORE,
value_spec=ck.avro_spec('share_price_record', schema_version='1'),
table_type=TableType.append()
)
cols = t.columns
self.assertEqual(7, len(cols))
self._assert_common_cols(cols)
self.assertEqual("Symbol", cols[3].name)
self.assertEqual(dtypes.string, cols[3].data_type)
self.assertEqual("Side", cols[4].name)
self.assertEqual(dtypes.string, cols[4].data_type)
self.assertEqual("Qty", cols[5].name)
self.assertEqual(dtypes.int32, cols[5].data_type)
self.assertEqual("Price", cols[6].name)
self.assertEqual(dtypes.double, cols[6].data_type)
with self.subTest(msg='mapping_only (filter out some schema fields)'):
m = {'Symbol': 'Ticker', 'Price': 'Dollars'}
t = ck.consume(
{
'bootstrap.servers': 'redpanda:29092',
'schema.registry.url': 'http://redpanda:8081'
},
'share_price',
key_spec=KeyValueSpec.IGNORE,
value_spec=ck.avro_spec('share_price_record', mapping=m, mapped_only=True),
table_type=TableType.append()
)
cols = t.columns
self.assertEqual(5, len(cols))
self._assert_common_cols(cols)
self.assertEqual("Ticker", cols[3].name)
self.assertEqual(dtypes.string, cols[3].data_type)
self.assertEqual("Dollars", cols[4].name)
self.assertEqual(dtypes.double, cols[4].data_type)
with self.subTest(msg='mapping (rename some fields)'):
m = {'Symbol': 'Ticker', 'Qty': 'Quantity'}
t = ck.consume(
{
'bootstrap.servers': 'redpanda:29092',
'schema.registry.url': 'http://redpanda:8081'
},
'share_price',
key_spec=KeyValueSpec.IGNORE,
value_spec=ck.avro_spec('share_price_record', mapping=m),
table_type=TableType.append()
)
cols = t.columns
self.assertEqual(7, len(cols))
self._assert_common_cols(cols)
self.assertEqual("Ticker", cols[3].name)
self.assertEqual(dtypes.string, cols[3].data_type)
self.assertEqual("Side", cols[4].name)
self.assertEqual(dtypes.string, cols[4].data_type)
self.assertEqual("Quantity", cols[5].name)
self.assertEqual(dtypes.int32, cols[5].data_type)
self.assertEqual("Price", cols[6].name)
self.assertEqual(dtypes.double, cols[6].data_type)
@unittest.skip("https://github.com/deephaven/deephaven-core/pull/2277")
def test_deprecated_table_types(self):
self.assertEqual(TableType.append(), TableType.Append)
self.assertEqual(TableType.stream(), TableType.Stream)
def test_table_types(self):
_ = TableType.append()
_ = TableType.stream()
_ = TableType.ring(4096)
if __name__ == "__main__":
unittest.main()
| true
| true
|
790c1c72522e0be45defe059030eba030b43cba0
| 532
|
py
|
Python
|
diversity_filters/diversity_filter.py
|
marco-foscato/Lib-INVENT
|
fe6a65ab7165abd87b25752a6b4208c8703d11f7
|
[
"Apache-2.0"
] | 26
|
2021-04-30T23:21:17.000Z
|
2022-03-10T06:33:11.000Z
|
diversity_filters/diversity_filter.py
|
marco-foscato/Lib-INVENT
|
fe6a65ab7165abd87b25752a6b4208c8703d11f7
|
[
"Apache-2.0"
] | 6
|
2021-10-03T08:35:48.000Z
|
2022-03-24T09:57:39.000Z
|
diversity_filters/diversity_filter.py
|
marco-foscato/Lib-INVENT
|
fe6a65ab7165abd87b25752a6b4208c8703d11f7
|
[
"Apache-2.0"
] | 10
|
2021-04-28T14:08:17.000Z
|
2022-03-04T04:18:13.000Z
|
from diversity_filters import NoFilter, NoFilterWithPenalty
from diversity_filters.base_diversity_filter import BaseDiversityFilter
from diversity_filters.diversity_filter_parameters import DiversityFilterParameters
class DiversityFilter:
def __new__(cls, parameters: DiversityFilterParameters) -> BaseDiversityFilter:
all_filters = dict(NoFilter=NoFilter,
NoFilterWithPenalty=NoFilterWithPenalty)
div_filter = all_filters.get(parameters.name)
return div_filter(parameters)
| 40.923077
| 83
| 0.789474
|
from diversity_filters import NoFilter, NoFilterWithPenalty
from diversity_filters.base_diversity_filter import BaseDiversityFilter
from diversity_filters.diversity_filter_parameters import DiversityFilterParameters
class DiversityFilter:
def __new__(cls, parameters: DiversityFilterParameters) -> BaseDiversityFilter:
all_filters = dict(NoFilter=NoFilter,
NoFilterWithPenalty=NoFilterWithPenalty)
div_filter = all_filters.get(parameters.name)
return div_filter(parameters)
| true
| true
|
790c1d9281014ad9759f80ce15899e74ddf496ab
| 179
|
py
|
Python
|
fixtures/python_output/post_form.py
|
martinsirbe/curlconverter
|
c5324e85d2ca24ef4743fb2bb36139d23367e293
|
[
"MIT"
] | 4,955
|
2015-01-02T09:04:20.000Z
|
2021-10-06T03:54:43.000Z
|
fixtures/python_output/post_form.py
|
martinsirbe/curlconverter
|
c5324e85d2ca24ef4743fb2bb36139d23367e293
|
[
"MIT"
] | 242
|
2015-03-27T05:59:11.000Z
|
2021-10-03T08:36:05.000Z
|
fixtures/python_output/post_form.py
|
martinsirbe/curlconverter
|
c5324e85d2ca24ef4743fb2bb36139d23367e293
|
[
"MIT"
] | 504
|
2015-01-02T16:04:36.000Z
|
2021-10-01T03:43:55.000Z
|
import requests
files = {
'username': (None, 'davidwalsh'),
'password': (None, 'something'),
}
response = requests.post('http://domain.tld/post-to-me.php', files=files)
| 19.888889
| 73
| 0.648045
|
import requests
files = {
'username': (None, 'davidwalsh'),
'password': (None, 'something'),
}
response = requests.post('http://domain.tld/post-to-me.php', files=files)
| true
| true
|
790c1f2434082d6c5ac1eb1820572375dab6fa4e
| 768
|
py
|
Python
|
catalog/bindings/wfs/allowed_values.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/wfs/allowed_values.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/wfs/allowed_values.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass, field
from typing import List
from bindings.wfs.range import Range
__NAMESPACE__ = "http://www.opengis.net/ows/1.1"
@dataclass
class AllowedValues:
"""List of all the valid values and/or ranges of values for this quantity.
For numeric quantities, signed values should be ordered from
negative infinity to positive infinity.
"""
class Meta:
namespace = "http://www.opengis.net/ows/1.1"
value: List[str] = field(
default_factory=list,
metadata={
"name": "Value",
"type": "Element",
},
)
range: List[Range] = field(
default_factory=list,
metadata={
"name": "Range",
"type": "Element",
},
)
| 23.272727
| 78
| 0.598958
|
from dataclasses import dataclass, field
from typing import List
from bindings.wfs.range import Range
__NAMESPACE__ = "http://www.opengis.net/ows/1.1"
@dataclass
class AllowedValues:
class Meta:
namespace = "http://www.opengis.net/ows/1.1"
value: List[str] = field(
default_factory=list,
metadata={
"name": "Value",
"type": "Element",
},
)
range: List[Range] = field(
default_factory=list,
metadata={
"name": "Range",
"type": "Element",
},
)
| true
| true
|
790c1ffe5f8782708c5d0814db539647984e9c48
| 162
|
py
|
Python
|
fedrec/communications/messages.py
|
rharish101/RecoEdge
|
8e33da7e09cea6208ccea0887d575b9431d11c27
|
[
"Apache-2.0"
] | null | null | null |
fedrec/communications/messages.py
|
rharish101/RecoEdge
|
8e33da7e09cea6208ccea0887d575b9431d11c27
|
[
"Apache-2.0"
] | null | null | null |
fedrec/communications/messages.py
|
rharish101/RecoEdge
|
8e33da7e09cea6208ccea0887d575b9431d11c27
|
[
"Apache-2.0"
] | null | null | null |
from enum import Enum
class ProcMessage(Enum):
SYNC_MODEL = 1
class JobCompletions():
SENDER_ID = 1
STATUS = True
RESULTS = {}
ERRORS = ""
| 13.5
| 24
| 0.623457
|
from enum import Enum
class ProcMessage(Enum):
SYNC_MODEL = 1
class JobCompletions():
SENDER_ID = 1
STATUS = True
RESULTS = {}
ERRORS = ""
| true
| true
|
790c2064fe63fa5e1f3040f85e72c22a184b36f6
| 463
|
py
|
Python
|
example/invoices/forms.py
|
pakondom/templated-docs
|
a52b87d55b26ed26773e3b4bfec634923c936480
|
[
"MIT"
] | 100
|
2016-07-19T12:39:55.000Z
|
2022-03-08T16:42:21.000Z
|
example/invoices/forms.py
|
pakondom/templated-docs
|
a52b87d55b26ed26773e3b4bfec634923c936480
|
[
"MIT"
] | 49
|
2018-09-05T06:56:22.000Z
|
2020-04-11T03:58:16.000Z
|
example/invoices/forms.py
|
pakondom/templated-docs
|
a52b87d55b26ed26773e3b4bfec634923c936480
|
[
"MIT"
] | 69
|
2016-07-18T09:30:25.000Z
|
2022-03-08T16:42:27.000Z
|
#--coding: utf8--
from django import forms
from invoices.models import Customer
class InvoiceForm(forms.Form):
FORMAT_CHOICES = (
('pdf', 'PDF'),
('docx', 'MS Word'),
('html', 'HTML'),
)
number = forms.CharField(label='Invoice #')
customer = forms.ModelChoiceField(queryset=Customer.objects.all())
subject = forms.CharField()
amount = forms.DecimalField()
format = forms.ChoiceField(choices=FORMAT_CHOICES)
| 24.368421
| 70
| 0.647948
|
from django import forms
from invoices.models import Customer
class InvoiceForm(forms.Form):
FORMAT_CHOICES = (
('pdf', 'PDF'),
('docx', 'MS Word'),
('html', 'HTML'),
)
number = forms.CharField(label='Invoice #')
customer = forms.ModelChoiceField(queryset=Customer.objects.all())
subject = forms.CharField()
amount = forms.DecimalField()
format = forms.ChoiceField(choices=FORMAT_CHOICES)
| true
| true
|
790c207725e1c54d9a32196cd02ceb7f9a4e7af7
| 18,376
|
py
|
Python
|
renderer/render_fmo.py
|
12564985/DeFMO
|
8ed9c2963678e2c59c7431ec8786302eea841572
|
[
"MIT"
] | 1
|
2022-03-14T12:46:38.000Z
|
2022-03-14T12:46:38.000Z
|
renderer/render_fmo.py
|
12564985/DeFMO
|
8ed9c2963678e2c59c7431ec8786302eea841572
|
[
"MIT"
] | null | null | null |
renderer/render_fmo.py
|
12564985/DeFMO
|
8ed9c2963678e2c59c7431ec8786302eea841572
|
[
"MIT"
] | null | null | null |
""" render_fmo.py renders obj file to rgb image with fmo model
Aviable function:
- clear_mash: delete all the mesh in the secene
- scene_setting_init: set scene configurations
- node_setting_init: set node configurations
- render: render rgb image for one obj file and one viewpoint
- render_obj: wrapper function for render() render
- init_all: a wrapper function, initialize all configurations
= set_image_path: reset defualt image output folder
author baiyu
modified by rozumden
"""
import sys
import os
import random
import pickle
import bpy
import glob
import numpy as np
from mathutils import Vector
from mathutils import Euler
import cv2
from PIL import Image
from skimage.draw import line_aa
from scipy import signal
from skimage.measure import regionprops
# import moviepy.editor as mpy
from array2gif import write_gif
abs_path = os.path.abspath(__file__)
sys.path.append(os.path.dirname(abs_path))
from render_helper import *
from settings import *
import settings
import pdb
def renderTraj(pars, H):
## Input: pars is either 2x2 (line) or 2x3 (parabola)
if pars.shape[1] == 2:
pars = np.concatenate( (pars, np.zeros((2,1))),1)
ns = 2
else:
ns = 5
ns = np.max([2, ns])
rangeint = np.linspace(0,1,ns)
for timeinst in range(rangeint.shape[0]-1):
ti0 = rangeint[timeinst]
ti1 = rangeint[timeinst+1]
start = pars[:,0] + pars[:,1]*ti0 + pars[:,2]*(ti0*ti0)
end = pars[:,0] + pars[:,1]*ti1 + pars[:,2]*(ti1*ti1)
start = np.round(start).astype(np.int32)
end = np.round(end).astype(np.int32)
rr, cc, val = line_aa(start[0], start[1], end[0], end[1])
valid = np.logical_and(np.logical_and(rr < H.shape[0], cc < H.shape[1]), np.logical_and(rr > 0, cc > 0))
rr = rr[valid]
cc = cc[valid]
val = val[valid]
if len(H.shape) > 2:
H[rr, cc, 0] = 0
H[rr, cc, 1] = 0
H[rr, cc, 2] = val
else:
H[rr, cc] = val
return H
def open_log(temp_folder = g_temp): # redirect output to log file
logfile = os.path.join(temp_folder,'blender_render.log')
try:
os.remove(logfile)
except OSError:
pass
open(logfile, 'a').close()
old = os.dup(1)
sys.stdout.flush()
os.close(1)
os.open(logfile, os.O_WRONLY)
return old
def close_log(old): # disable output redirection
os.close(1)
os.dup(old)
os.close(old)
def clear_mesh():
""" clear all meshes in the secene
"""
bpy.ops.object.select_all(action='DESELECT')
for obj in bpy.data.objects:
if obj.type == 'MESH':
obj.select = True
bpy.ops.object.delete()
for block in bpy.data.meshes:
if block.users == 0:
bpy.data.meshes.remove(block)
for block in bpy.data.materials:
if block.users == 0:
bpy.data.materials.remove(block)
for block in bpy.data.textures:
if block.users == 0:
bpy.data.textures.remove(block)
for block in bpy.data.images:
if block.users == 0:
bpy.data.images.remove(block)
def scene_setting_init(use_gpu):
"""initialize blender setting configurations
"""
sce = bpy.context.scene.name
bpy.data.scenes[sce].render.engine = g_engine_type
bpy.data.scenes[sce].cycles.film_transparent = g_use_film_transparent
#output
bpy.data.scenes[sce].render.image_settings.color_mode = g_rgb_color_mode
bpy.data.scenes[sce].render.image_settings.color_depth = g_rgb_color_depth
bpy.data.scenes[sce].render.image_settings.file_format = g_rgb_file_format
bpy.data.scenes[sce].render.use_overwrite = g_depth_use_overwrite
bpy.data.scenes[sce].render.use_file_extension = g_depth_use_file_extension
if g_ambient_light:
world = bpy.data.worlds['World']
world.use_nodes = True
bg = world.node_tree.nodes['Background']
bg.inputs[0].default_value[:3] = g_bg_color
bg.inputs[1].default_value = 1.0
#dimensions
bpy.data.scenes[sce].render.resolution_x = g_resolution_x
bpy.data.scenes[sce].render.resolution_y = g_resolution_y
bpy.data.scenes[sce].render.resolution_percentage = g_resolution_percentage
if use_gpu:
bpy.data.scenes[sce].render.engine = 'CYCLES' #only cycles engine can use gpu
bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral
bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral
bpy.context.user_preferences.addons['cycles'].preferences.devices[0].use = False
bpy.context.user_preferences.addons['cycles'].preferences.devices[1].use = True
ndev = len(bpy.context.user_preferences.addons['cycles'].preferences.devices)
print('Number of devices {}'.format(ndev))
for ki in range(2,ndev):
bpy.context.user_preferences.addons['cycles'].preferences.devices[ki].use = False
bpy.context.user_preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
# bpy.types.CyclesRenderSettings.device = 'GPU'
bpy.data.scenes[sce].cycles.device = 'GPU'
def node_setting_init():
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
for node in tree.nodes:
tree.nodes.remove(node)
render_layer_node = tree.nodes.new('CompositorNodeRLayers')
image_output_node = tree.nodes.new('CompositorNodeOutputFile')
image_output_node.base_path = g_syn_rgb_folder
links.new(render_layer_node.outputs[0], image_output_node.inputs[0])
# image_output_node = bpy.context.scene.node_tree.nodes[1]
image_output_node.base_path = g_temp
image_output_node.file_slots[0].path = 'image-######.png' # blender placeholder #
def render(obj_path, viewpoint, temp_folder):
"""render rbg image
render a object rgb image by a given camera viewpoint and
choose random image as background, only render one image
at a time.
Args:
obj_path: a string variable indicate the obj file path
viewpoint: a vp parameter(contains azimuth,elevation,tilt angles and distance)
"""
vp = viewpoint
cam_location = camera_location(vp.azimuth, vp.elevation, vp.distance)
cam_rot = camera_rot_XYZEuler(vp.azimuth, vp.elevation, vp.tilt)
cam_obj = bpy.data.objects['Camera']
cam_obj.location[0] = cam_location[0]
cam_obj.location[1] = cam_location[1]
cam_obj.location[2] = cam_location[2]
cam_obj.rotation_euler[0] = cam_rot[0]
cam_obj.rotation_euler[1] = cam_rot[1]
cam_obj.rotation_euler[2] = cam_rot[2]
if not os.path.exists(g_syn_rgb_folder):
os.mkdir(g_syn_rgb_folder)
obj = bpy.data.objects['model_normalized']
ni = g_fmo_steps
maxlen = 0.5
maxrot = 1.57/6
tri = 0
# rot_base = np.array([math.pi/2,0,0])
while tri <= g_max_trials:
do_repeat = False
tri += 1
if not g_apply_texture:
for oi in range(len(bpy.data.objects)):
if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':
continue
for tempi in range(len(bpy.data.objects[oi].data.materials)):
if bpy.data.objects[oi].data.materials[tempi].alpha != 1.0:
return True, True ## transparent object
los_start = Vector((random.uniform(-maxlen/10, maxlen/10), random.uniform(-maxlen, maxlen), random.uniform(-maxlen, maxlen)))
loc_step = Vector((random.uniform(-maxlen/10, maxlen/10), random.uniform(-maxlen, maxlen), random.uniform(-maxlen, maxlen)))/ni
rot_base = np.array((random.uniform(0, 2*math.pi), random.uniform(0, 2*math.pi), random.uniform(0, 2*math.pi)))
rot_step = np.array((random.uniform(-maxrot, maxrot), random.uniform(-maxrot, maxrot), random.uniform(-maxrot, maxrot)))/ni
old = open_log(temp_folder)
for ki in [0, ni-1]+list(range(1,ni-1)):
for oi in range(len(bpy.data.objects)):
if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':
continue
bpy.data.objects[oi].location = los_start + loc_step*ki
bpy.data.objects[oi].rotation_euler = Euler(rot_base + (rot_step*ki))
bpy.context.scene.frame_set(ki + 1)
bpy.ops.render.render(write_still=True) #start rendering
if ki == 0 or ki == (ni-1):
Mt = cv2.imread(os.path.join(bpy.context.scene.node_tree.nodes[1].base_path,'image-{:06d}.png'.format(ki+1)),cv2.IMREAD_UNCHANGED)[:,:,-1] > 0
is_border = ((Mt[0,:].sum()+Mt[-1,:].sum()+Mt[:,0].sum()+Mt[:,-1].sum()) > 0) or Mt.sum()==0
if is_border:
if ki == 0:
close_log(old)
return False, True ## sample different starting viewpoint
else:
do_repeat = True ## just sample another motion direction
if do_repeat:
break
close_log(old)
if do_repeat == False:
break
if do_repeat: ## sample different starting viewpoint
return False, True
return False, False
def make_fmo(path, gt_path, video_path):
n_im = 5
background_images = os.listdir(g_background_image_path)
seq_name = random.choice(background_images)
seq_images = glob.glob(os.path.join(g_background_image_path,seq_name,"*.jpg"))
if len(seq_images) <= n_im:
seq_images = glob.glob(os.path.join(g_background_image_path,seq_name,"*.png"))
seq_images.sort()
bgri = random.randint(n_im,len(seq_images)-1)
bgr_path = seq_images[bgri]
B0 = cv2.imread(bgr_path)/255
B = cv2.resize(B0, dsize=(int(g_resolution_x*g_resolution_percentage/100), int(g_resolution_y*g_resolution_percentage/100)), interpolation=cv2.INTER_CUBIC)
B[B > 1] = 1
B[B < 0] = 0
FH = np.zeros(B.shape)
MH = np.zeros(B.shape[:2])
pars = np.array([[(B.shape[0]-1)/2-1, (B.shape[1]-1)/2-1], [1.0, 1.0]]).T
FM = np.zeros(B.shape[:2]+(4,g_fmo_steps,))
centroids = np.zeros((2,g_fmo_steps))
for ki in range(g_fmo_steps):
FM[:,:,:,ki] = cv2.imread(os.path.join(gt_path,'image-{:06d}.png'.format(ki+1)),cv2.IMREAD_UNCHANGED)/g_rgb_color_max
props = regionprops((FM[:,:,-1,ki]>0).astype(int))
if len(props) != 1:
return False
centroids[:,ki] = props[0].centroid
for ki in range(g_fmo_steps):
F = FM[:,:,:-1,ki]*FM[:,:,-1:,ki]
M = FM[:,:,-1,ki]
if ki < g_fmo_steps-1:
pars[:,1] = centroids[:,ki+1] - centroids[:,ki]
H = renderTraj(pars, np.zeros(B.shape[:2]))
H /= H.sum()*g_fmo_steps
for kk in range(3):
FH[:,:,kk] += signal.fftconvolve(H, F[:,:,kk], mode='same')
MH += signal.fftconvolve(H, M, mode='same')
Im = FH + (1 - MH)[:,:,np.newaxis]*B
Im[Im > 1] = 1
Im[Im < 0] = 0
if g_skip_low_contrast:
Diff = np.sum(np.abs(Im - B),2)
meanval = np.mean(Diff[MH > 0.05])
print("Contrast {}".format(meanval))
if meanval < 0.2:
return False
if g_skip_small:
sizeper = np.sum(MH > 0.01)/(MH.shape[0]*MH.shape[1])
print("Size percentage {}".format(sizeper))
if sizeper < 0.05:
return False
Im = Im[:,:,[2,1,0]]
Ims = Image.fromarray((Im * 255).astype(np.uint8))
Ims.save(path)
Ball = np.zeros(B.shape+(n_im,))
Ball[:,:,:,0] = B
for ki in range(1,n_im):
bgrki_path = seq_images[bgri-ki]
Ball[:,:,:,ki] = cv2.resize(cv2.imread(bgrki_path)/255, dsize=(int(g_resolution_x*g_resolution_percentage/100), int(g_resolution_y*g_resolution_percentage/100)), interpolation=cv2.INTER_CUBIC)
Ball[Ball > 1] = 1
Ball[Ball < 0] = 0
Bmed = np.median(Ball,3)
Image.fromarray((B[:,:,[2,1,0]] * 255).astype(np.uint8)).save(os.path.join(gt_path,'bgr.png'))
Image.fromarray((Bmed[:,:,[2,1,0]] * 255).astype(np.uint8)).save(os.path.join(gt_path,'bgr_med.png'))
# Ims.save(os.path.join(g_temp,"I.png"))
# Image.fromarray((FH * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"FH.png"))
# Image.fromarray((MH * 255).astype(np.uint8)).save(os.path.join(g_temp,"MH.png"))
# Image.fromarray((M * 255).astype(np.uint8)).save(os.path.join(g_temp,"M.png"))
# Image.fromarray((F * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"F.png"))
# Image.fromarray((B0 * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"B.png"))
if False:
Fwr = FM[:,:,:-1,:] * FM[:,:,-1:,:] + 1 * (1 - FM[:,:,-1:,:])
Fwr = (Fwr * 255).astype(np.uint8)
# Fwr[np.repeat(FM[:,:,-1:,:]==0,3,2)]=255
out = cv2.VideoWriter(video_path,cv2.VideoWriter_fourcc(*"MJPG"), 6, (F.shape[1],F.shape[0]),True)
for ki in range(g_fmo_steps):
out.write(Fwr[:,:,:,ki])
out.release()
return True
def render_obj(obj_path, path, objid, obj_name, temp_folder):
""" render one obj file by a given viewpoint list
a wrapper function for render()
Args:
obj_path: a string variable indicate the obj file path
"""
vps_path = random.sample(g_view_point_file, 1)[0]
vps = list(load_viewpoint(vps_path))
random.shuffle(vps)
save_path = os.path.join(path,"{}_{:04d}.png".format(obj_name,objid))
gt_path = os.path.join(path,"GT","{}_{:04d}".format(obj_name,objid))
video_path = os.path.join(path,"{}_{:04d}.avi".format(obj_name,objid))
if not os.path.exists(gt_path):
os.mkdir(gt_path)
image_output_node = bpy.context.scene.node_tree.nodes[1]
image_output_node.base_path = gt_path
for imt in bpy.data.images:
bpy.data.images.remove(imt)
if g_apply_texture:
for oi in range(len(bpy.data.objects)):
if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':
continue
bpy.context.scene.objects.active = bpy.data.objects[oi]
# pdb.set_trace()
# for m in bpy.data.materials:
# bpy.data.materials.remove(m)
# bpy.ops.object.material_slot_remove()
bpy.ops.object.editmode_toggle()
bpy.ops.uv.cube_project()
bpy.ops.object.editmode_toggle()
texture_images = os.listdir(g_texture_path)
texture = random.choice(texture_images)
tex_path = os.path.join(g_texture_path,texture)
# mat = bpy.data.materials.new(texture)
# mat.use_nodes = True
# nt = mat.node_tree
# nodes = nt.nodes
# links = nt.links
# # Image Texture
# textureNode = nodes.new("ShaderNodeTexImage")
# textureNode.image = bpy.data.images.load(tex_path)
# links.new(nodes['Diffuse BSDF'].inputs['Color'], textureNode.outputs['Color'])
# mat.specular_intensity = 0
# bpy.data.objects[oi].active_material = mat
# print(bpy.data.objects[oi].active_material)
for mat in bpy.data.materials:
nodes = mat.node_tree.nodes
links = mat.node_tree.links
textureNode = nodes.new("ShaderNodeTexImage")
textureNode.image = bpy.data.images.load(tex_path)
links.new(nodes['Diffuse BSDF'].inputs['Color'], textureNode.outputs['Color'])
# print(bpy.data.objects[oi].active_material)
tri = 0
while tri <= g_max_trials:
tri += 1
vp = random.sample(vps, 1)[0]
sample_different_object, sample_different_vp = render(obj_path, vp, temp_folder)
if sample_different_vp:
if sample_different_object:
print('Transparent object!')
return False
print('Rendering failed, repeating')
continue
success = make_fmo(save_path, gt_path, video_path)
if success:
return True
print('Making FMO failed, repeating')
return False
def init_all():
"""init everything we need for rendering
an image
"""
scene_setting_init(g_gpu_render_enable)
node_setting_init()
cam_obj = bpy.data.objects['Camera']
cam_obj.rotation_mode = g_rotation_mode
if g_render_light:
bpy.data.objects['Lamp'].data.energy = 50
bpy.ops.object.lamp_add(type='SUN')
bpy.data.objects['Sun'].data.energy = 5
### YOU CAN WRITE YOUR OWN IMPLEMENTATION TO GENERATE DATA
init_all()
argv = sys.argv
argv = argv[argv.index("--") + 1:]
start_index = int(argv[0])
step_index = int(argv[1])
print('Start index {}, step index {}'.format(start_index, step_index))
temp_folder = g_syn_rgb_folder+g_render_objs[start_index]+'/'
for obj_name in g_render_objs[start_index:(start_index+step_index)]:
print("Processing object {}".format(obj_name))
obj_folder = os.path.join(g_syn_rgb_folder, obj_name)
if not os.path.exists(obj_folder):
os.makedirs(obj_folder)
if not os.path.exists(os.path.join(obj_folder,"GT")):
os.mkdir(os.path.join(obj_folder,"GT"))
num = g_shapenet_categlory_pair[obj_name]
search_path = os.path.join(g_shapenet_path, num, '**','*.obj')
pathes = glob.glob(search_path, recursive=True)
random.shuffle(pathes)
objid = 1
tri = 0
while objid <= g_number_per_category:
print(" instance {}".format(objid))
clear_mesh()
path = random.sample(pathes, 1)[0]
old = open_log(temp_folder)
bpy.ops.import_scene.obj(filepath=path, axis_forward='-Z', axis_up='Y', filter_glob="*.obj;*.mtl", use_split_groups=False, use_split_objects=True)
# bpy.ops.import_scene.obj(filepath=path)
close_log(old)
#combine_objects()
#scale_objects(0.5)
result = render_obj(path, obj_folder, objid, obj_name, temp_folder)
if result:
objid += 1
tri = 0
else:
print('Error! Rendering another object from the category!')
tri += 1
if tri > g_max_trials:
print('No object find in the category!!!!!!!!!')
break
| 39.181237
| 200
| 0.619286
|
import sys
import os
import random
import pickle
import bpy
import glob
import numpy as np
from mathutils import Vector
from mathutils import Euler
import cv2
from PIL import Image
from skimage.draw import line_aa
from scipy import signal
from skimage.measure import regionprops
from array2gif import write_gif
abs_path = os.path.abspath(__file__)
sys.path.append(os.path.dirname(abs_path))
from render_helper import *
from settings import *
import settings
import pdb
def renderTraj(pars, H):
enate( (pars, np.zeros((2,1))),1)
ns = 2
else:
ns = 5
ns = np.max([2, ns])
rangeint = np.linspace(0,1,ns)
for timeinst in range(rangeint.shape[0]-1):
ti0 = rangeint[timeinst]
ti1 = rangeint[timeinst+1]
start = pars[:,0] + pars[:,1]*ti0 + pars[:,2]*(ti0*ti0)
end = pars[:,0] + pars[:,1]*ti1 + pars[:,2]*(ti1*ti1)
start = np.round(start).astype(np.int32)
end = np.round(end).astype(np.int32)
rr, cc, val = line_aa(start[0], start[1], end[0], end[1])
valid = np.logical_and(np.logical_and(rr < H.shape[0], cc < H.shape[1]), np.logical_and(rr > 0, cc > 0))
rr = rr[valid]
cc = cc[valid]
val = val[valid]
if len(H.shape) > 2:
H[rr, cc, 0] = 0
H[rr, cc, 1] = 0
H[rr, cc, 2] = val
else:
H[rr, cc] = val
return H
def open_log(temp_folder = g_temp):
logfile = os.path.join(temp_folder,'blender_render.log')
try:
os.remove(logfile)
except OSError:
pass
open(logfile, 'a').close()
old = os.dup(1)
sys.stdout.flush()
os.close(1)
os.open(logfile, os.O_WRONLY)
return old
def close_log(old):
os.close(1)
os.dup(old)
os.close(old)
def clear_mesh():
bpy.ops.object.select_all(action='DESELECT')
for obj in bpy.data.objects:
if obj.type == 'MESH':
obj.select = True
bpy.ops.object.delete()
for block in bpy.data.meshes:
if block.users == 0:
bpy.data.meshes.remove(block)
for block in bpy.data.materials:
if block.users == 0:
bpy.data.materials.remove(block)
for block in bpy.data.textures:
if block.users == 0:
bpy.data.textures.remove(block)
for block in bpy.data.images:
if block.users == 0:
bpy.data.images.remove(block)
def scene_setting_init(use_gpu):
sce = bpy.context.scene.name
bpy.data.scenes[sce].render.engine = g_engine_type
bpy.data.scenes[sce].cycles.film_transparent = g_use_film_transparent
bpy.data.scenes[sce].render.image_settings.color_mode = g_rgb_color_mode
bpy.data.scenes[sce].render.image_settings.color_depth = g_rgb_color_depth
bpy.data.scenes[sce].render.image_settings.file_format = g_rgb_file_format
bpy.data.scenes[sce].render.use_overwrite = g_depth_use_overwrite
bpy.data.scenes[sce].render.use_file_extension = g_depth_use_file_extension
if g_ambient_light:
world = bpy.data.worlds['World']
world.use_nodes = True
bg = world.node_tree.nodes['Background']
bg.inputs[0].default_value[:3] = g_bg_color
bg.inputs[1].default_value = 1.0
bpy.data.scenes[sce].render.resolution_x = g_resolution_x
bpy.data.scenes[sce].render.resolution_y = g_resolution_y
bpy.data.scenes[sce].render.resolution_percentage = g_resolution_percentage
if use_gpu:
bpy.data.scenes[sce].render.engine = 'CYCLES'
bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral
bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral
bpy.context.user_preferences.addons['cycles'].preferences.devices[0].use = False
bpy.context.user_preferences.addons['cycles'].preferences.devices[1].use = True
ndev = len(bpy.context.user_preferences.addons['cycles'].preferences.devices)
print('Number of devices {}'.format(ndev))
for ki in range(2,ndev):
bpy.context.user_preferences.addons['cycles'].preferences.devices[ki].use = False
bpy.context.user_preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
bpy.data.scenes[sce].cycles.device = 'GPU'
def node_setting_init():
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
for node in tree.nodes:
tree.nodes.remove(node)
render_layer_node = tree.nodes.new('CompositorNodeRLayers')
image_output_node = tree.nodes.new('CompositorNodeOutputFile')
image_output_node.base_path = g_syn_rgb_folder
links.new(render_layer_node.outputs[0], image_output_node.inputs[0])
image_output_node.base_path = g_temp
image_output_node.file_slots[0].path = 'image-######.png'
def render(obj_path, viewpoint, temp_folder):
vp = viewpoint
cam_location = camera_location(vp.azimuth, vp.elevation, vp.distance)
cam_rot = camera_rot_XYZEuler(vp.azimuth, vp.elevation, vp.tilt)
cam_obj = bpy.data.objects['Camera']
cam_obj.location[0] = cam_location[0]
cam_obj.location[1] = cam_location[1]
cam_obj.location[2] = cam_location[2]
cam_obj.rotation_euler[0] = cam_rot[0]
cam_obj.rotation_euler[1] = cam_rot[1]
cam_obj.rotation_euler[2] = cam_rot[2]
if not os.path.exists(g_syn_rgb_folder):
os.mkdir(g_syn_rgb_folder)
obj = bpy.data.objects['model_normalized']
ni = g_fmo_steps
maxlen = 0.5
maxrot = 1.57/6
tri = 0
while tri <= g_max_trials:
do_repeat = False
tri += 1
if not g_apply_texture:
for oi in range(len(bpy.data.objects)):
if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':
continue
for tempi in range(len(bpy.data.objects[oi].data.materials)):
if bpy.data.objects[oi].data.materials[tempi].alpha != 1.0:
return True, True = Vector((random.uniform(-maxlen/10, maxlen/10), random.uniform(-maxlen, maxlen), random.uniform(-maxlen, maxlen)))
loc_step = Vector((random.uniform(-maxlen/10, maxlen/10), random.uniform(-maxlen, maxlen), random.uniform(-maxlen, maxlen)))/ni
rot_base = np.array((random.uniform(0, 2*math.pi), random.uniform(0, 2*math.pi), random.uniform(0, 2*math.pi)))
rot_step = np.array((random.uniform(-maxrot, maxrot), random.uniform(-maxrot, maxrot), random.uniform(-maxrot, maxrot)))/ni
old = open_log(temp_folder)
for ki in [0, ni-1]+list(range(1,ni-1)):
for oi in range(len(bpy.data.objects)):
if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':
continue
bpy.data.objects[oi].location = los_start + loc_step*ki
bpy.data.objects[oi].rotation_euler = Euler(rot_base + (rot_step*ki))
bpy.context.scene.frame_set(ki + 1)
bpy.ops.render.render(write_still=True)
if ki == 0 or ki == (ni-1):
Mt = cv2.imread(os.path.join(bpy.context.scene.node_tree.nodes[1].base_path,'image-{:06d}.png'.format(ki+1)),cv2.IMREAD_UNCHANGED)[:,:,-1] > 0
is_border = ((Mt[0,:].sum()+Mt[-1,:].sum()+Mt[:,0].sum()+Mt[:,-1].sum()) > 0) or Mt.sum()==0
if is_border:
if ki == 0:
close_log(old)
return False, True do_repeat = True break
close_log(old)
if do_repeat == False:
break
if do_repeat: n False, False
def make_fmo(path, gt_path, video_path):
n_im = 5
background_images = os.listdir(g_background_image_path)
seq_name = random.choice(background_images)
seq_images = glob.glob(os.path.join(g_background_image_path,seq_name,"*.jpg"))
if len(seq_images) <= n_im:
seq_images = glob.glob(os.path.join(g_background_image_path,seq_name,"*.png"))
seq_images.sort()
bgri = random.randint(n_im,len(seq_images)-1)
bgr_path = seq_images[bgri]
B0 = cv2.imread(bgr_path)/255
B = cv2.resize(B0, dsize=(int(g_resolution_x*g_resolution_percentage/100), int(g_resolution_y*g_resolution_percentage/100)), interpolation=cv2.INTER_CUBIC)
B[B > 1] = 1
B[B < 0] = 0
FH = np.zeros(B.shape)
MH = np.zeros(B.shape[:2])
pars = np.array([[(B.shape[0]-1)/2-1, (B.shape[1]-1)/2-1], [1.0, 1.0]]).T
FM = np.zeros(B.shape[:2]+(4,g_fmo_steps,))
centroids = np.zeros((2,g_fmo_steps))
for ki in range(g_fmo_steps):
FM[:,:,:,ki] = cv2.imread(os.path.join(gt_path,'image-{:06d}.png'.format(ki+1)),cv2.IMREAD_UNCHANGED)/g_rgb_color_max
props = regionprops((FM[:,:,-1,ki]>0).astype(int))
if len(props) != 1:
return False
centroids[:,ki] = props[0].centroid
for ki in range(g_fmo_steps):
F = FM[:,:,:-1,ki]*FM[:,:,-1:,ki]
M = FM[:,:,-1,ki]
if ki < g_fmo_steps-1:
pars[:,1] = centroids[:,ki+1] - centroids[:,ki]
H = renderTraj(pars, np.zeros(B.shape[:2]))
H /= H.sum()*g_fmo_steps
for kk in range(3):
FH[:,:,kk] += signal.fftconvolve(H, F[:,:,kk], mode='same')
MH += signal.fftconvolve(H, M, mode='same')
Im = FH + (1 - MH)[:,:,np.newaxis]*B
Im[Im > 1] = 1
Im[Im < 0] = 0
if g_skip_low_contrast:
Diff = np.sum(np.abs(Im - B),2)
meanval = np.mean(Diff[MH > 0.05])
print("Contrast {}".format(meanval))
if meanval < 0.2:
return False
if g_skip_small:
sizeper = np.sum(MH > 0.01)/(MH.shape[0]*MH.shape[1])
print("Size percentage {}".format(sizeper))
if sizeper < 0.05:
return False
Im = Im[:,:,[2,1,0]]
Ims = Image.fromarray((Im * 255).astype(np.uint8))
Ims.save(path)
Ball = np.zeros(B.shape+(n_im,))
Ball[:,:,:,0] = B
for ki in range(1,n_im):
bgrki_path = seq_images[bgri-ki]
Ball[:,:,:,ki] = cv2.resize(cv2.imread(bgrki_path)/255, dsize=(int(g_resolution_x*g_resolution_percentage/100), int(g_resolution_y*g_resolution_percentage/100)), interpolation=cv2.INTER_CUBIC)
Ball[Ball > 1] = 1
Ball[Ball < 0] = 0
Bmed = np.median(Ball,3)
Image.fromarray((B[:,:,[2,1,0]] * 255).astype(np.uint8)).save(os.path.join(gt_path,'bgr.png'))
Image.fromarray((Bmed[:,:,[2,1,0]] * 255).astype(np.uint8)).save(os.path.join(gt_path,'bgr_med.png'))
if False:
Fwr = FM[:,:,:-1,:] * FM[:,:,-1:,:] + 1 * (1 - FM[:,:,-1:,:])
Fwr = (Fwr * 255).astype(np.uint8)
out = cv2.VideoWriter(video_path,cv2.VideoWriter_fourcc(*"MJPG"), 6, (F.shape[1],F.shape[0]),True)
for ki in range(g_fmo_steps):
out.write(Fwr[:,:,:,ki])
out.release()
return True
def render_obj(obj_path, path, objid, obj_name, temp_folder):
vps_path = random.sample(g_view_point_file, 1)[0]
vps = list(load_viewpoint(vps_path))
random.shuffle(vps)
save_path = os.path.join(path,"{}_{:04d}.png".format(obj_name,objid))
gt_path = os.path.join(path,"GT","{}_{:04d}".format(obj_name,objid))
video_path = os.path.join(path,"{}_{:04d}.avi".format(obj_name,objid))
if not os.path.exists(gt_path):
os.mkdir(gt_path)
image_output_node = bpy.context.scene.node_tree.nodes[1]
image_output_node.base_path = gt_path
for imt in bpy.data.images:
bpy.data.images.remove(imt)
if g_apply_texture:
for oi in range(len(bpy.data.objects)):
if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':
continue
bpy.context.scene.objects.active = bpy.data.objects[oi]
bpy.ops.object.editmode_toggle()
bpy.ops.uv.cube_project()
bpy.ops.object.editmode_toggle()
texture_images = os.listdir(g_texture_path)
texture = random.choice(texture_images)
tex_path = os.path.join(g_texture_path,texture)
for mat in bpy.data.materials:
nodes = mat.node_tree.nodes
links = mat.node_tree.links
textureNode = nodes.new("ShaderNodeTexImage")
textureNode.image = bpy.data.images.load(tex_path)
links.new(nodes['Diffuse BSDF'].inputs['Color'], textureNode.outputs['Color'])
tri = 0
while tri <= g_max_trials:
tri += 1
vp = random.sample(vps, 1)[0]
sample_different_object, sample_different_vp = render(obj_path, vp, temp_folder)
if sample_different_vp:
if sample_different_object:
print('Transparent object!')
return False
print('Rendering failed, repeating')
continue
success = make_fmo(save_path, gt_path, video_path)
if success:
return True
print('Making FMO failed, repeating')
return False
def init_all():
scene_setting_init(g_gpu_render_enable)
node_setting_init()
cam_obj = bpy.data.objects['Camera']
cam_obj.rotation_mode = g_rotation_mode
if g_render_light:
bpy.data.objects['Lamp'].data.energy = 50
bpy.ops.object.lamp_add(type='SUN')
bpy.data.objects['Sun'].data.energy = 5
gv[1])
print('Start index {}, step index {}'.format(start_index, step_index))
temp_folder = g_syn_rgb_folder+g_render_objs[start_index]+'/'
for obj_name in g_render_objs[start_index:(start_index+step_index)]:
print("Processing object {}".format(obj_name))
obj_folder = os.path.join(g_syn_rgb_folder, obj_name)
if not os.path.exists(obj_folder):
os.makedirs(obj_folder)
if not os.path.exists(os.path.join(obj_folder,"GT")):
os.mkdir(os.path.join(obj_folder,"GT"))
num = g_shapenet_categlory_pair[obj_name]
search_path = os.path.join(g_shapenet_path, num, '**','*.obj')
pathes = glob.glob(search_path, recursive=True)
random.shuffle(pathes)
objid = 1
tri = 0
while objid <= g_number_per_category:
print(" instance {}".format(objid))
clear_mesh()
path = random.sample(pathes, 1)[0]
old = open_log(temp_folder)
bpy.ops.import_scene.obj(filepath=path, axis_forward='-Z', axis_up='Y', filter_glob="*.obj;*.mtl", use_split_groups=False, use_split_objects=True)
close_log(old)
result = render_obj(path, obj_folder, objid, obj_name, temp_folder)
if result:
objid += 1
tri = 0
else:
print('Error! Rendering another object from the category!')
tri += 1
if tri > g_max_trials:
print('No object find in the category!!!!!!!!!')
break
| true
| true
|
790c221e22bedc50c594b18d652cc9bf37dae1c9
| 1,877
|
py
|
Python
|
train_more.py
|
We-Gold/Malaria
|
98c7fc66bcf2b9a29f88bfdab231a59fd1fc95f4
|
[
"MIT"
] | 1
|
2019-12-23T20:57:12.000Z
|
2019-12-23T20:57:12.000Z
|
train_more.py
|
We-Gold/Malaria
|
98c7fc66bcf2b9a29f88bfdab231a59fd1fc95f4
|
[
"MIT"
] | null | null | null |
train_more.py
|
We-Gold/Malaria
|
98c7fc66bcf2b9a29f88bfdab231a59fd1fc95f4
|
[
"MIT"
] | null | null | null |
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.applications.mobilenet import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory('cell_images/train',
target_size=(100,100),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory('cell_images/test',
target_size=(100,100),
batch_size=32,
class_mode='binary')
model = keras.models.load_model("model.h5")
#model.compile(optimizer = 'Adam',loss = 'binary_crossentropy',metrics = ['accuracy'])
#checkpoints = ModelCheckpoint("checkpoints/weights.{epoch:02d}.h5",
# save_weights_only = False,
# verbose = 1)
#step_size_train = train_generator.n//train_generator.batch_size
model.fit_generator(train_generator,
steps_per_epoch=8000,
epochs=5,
validation_data=validation_generator,
validation_steps=800)
#callbacks = [checkpoints])
model.save("model_2.h5")
| 39.93617
| 86
| 0.546084
|
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.applications.mobilenet import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory('cell_images/train',
target_size=(100,100),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory('cell_images/test',
target_size=(100,100),
batch_size=32,
class_mode='binary')
model = keras.models.load_model("model.h5")
model.fit_generator(train_generator,
steps_per_epoch=8000,
epochs=5,
validation_data=validation_generator,
validation_steps=800)
model.save("model_2.h5")
| true
| true
|
790c22e9c49c2fd46e5a28dd323b0125dd895d97
| 489
|
py
|
Python
|
src/staf/migrations/0009_dataset_process.py
|
metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3
|
c754d3b1b401906a21640b8eacb6b724a448b31c
|
[
"MIT"
] | null | null | null |
src/staf/migrations/0009_dataset_process.py
|
metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3
|
c754d3b1b401906a21640b8eacb6b724a448b31c
|
[
"MIT"
] | null | null | null |
src/staf/migrations/0009_dataset_process.py
|
metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3
|
c754d3b1b401906a21640b8eacb6b724a448b31c
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.3 on 2018-12-08 05:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('staf', '0008_auto_20181207_1525'),
]
operations = [
migrations.AddField(
model_name='dataset',
name='process',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='staf.Process'),
),
]
| 24.45
| 123
| 0.640082
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('staf', '0008_auto_20181207_1525'),
]
operations = [
migrations.AddField(
model_name='dataset',
name='process',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='staf.Process'),
),
]
| true
| true
|
790c23939ebcb20e71804e4c46bb53d90e1d32dc
| 12,931
|
py
|
Python
|
rdflib/plugins/serializers/rdfxml.py
|
jclerman/rdflib
|
75c545e0747032a700e0bfc68dd044b86c275d03
|
[
"BSD-3-Clause"
] | 1
|
2021-05-25T12:32:26.000Z
|
2021-05-25T12:32:26.000Z
|
rdflib/plugins/serializers/rdfxml.py
|
jclerman/rdflib
|
75c545e0747032a700e0bfc68dd044b86c275d03
|
[
"BSD-3-Clause"
] | 19
|
2021-06-28T06:17:30.000Z
|
2021-11-29T06:14:43.000Z
|
rdflib/plugins/serializers/rdfxml.py
|
ajnelson-nist/rdflib
|
2011a6dd85518642e0800b2ee010a5565e16e5cc
|
[
"BSD-3-Clause"
] | 1
|
2021-05-16T08:08:56.000Z
|
2021-05-16T08:08:56.000Z
|
from typing import IO, Dict, Optional, Set
from rdflib.plugins.serializers.xmlwriter import XMLWriter
from rdflib.namespace import Namespace, RDF, RDFS # , split_uri
from rdflib.plugins.parsers.RDFVOC import RDFVOC
from rdflib.graph import Graph
from rdflib.term import Identifier, URIRef, Literal, BNode
from rdflib.util import first, more_than
from rdflib.collection import Collection
from rdflib.serializer import Serializer
from xml.sax.saxutils import quoteattr, escape
import xml.dom.minidom
from .xmlwriter import ESCAPE_ENTITIES
__all__ = ["fix", "XMLSerializer", "PrettyXMLSerializer"]
class XMLSerializer(Serializer):
def __init__(self, store: Graph):
super(XMLSerializer, self).__init__(store)
def __bindings(self):
store = self.store
nm = store.namespace_manager
bindings = {}
for predicate in set(store.predicates()):
prefix, namespace, name = nm.compute_qname_strict(predicate)
bindings[prefix] = URIRef(namespace)
RDFNS = URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
if "rdf" in bindings:
assert bindings["rdf"] == RDFNS
else:
bindings["rdf"] = RDFNS
for prefix, namespace in bindings.items():
yield prefix, namespace
def serialize(
self,
stream: IO[bytes],
base: Optional[str] = None,
encoding: Optional[str] = None,
**args,
):
# if base is given here, use that, if not and a base is set for the graph use that
if base is not None:
self.base = base
elif self.store.base is not None:
self.base = self.store.base
self.__stream = stream
self.__serialized: Dict[Identifier, int] = {}
encoding = self.encoding
self.write = write = lambda uni: stream.write(uni.encode(encoding, "replace"))
# startDocument
write('<?xml version="1.0" encoding="%s"?>\n' % self.encoding)
# startRDF
write("<rdf:RDF\n")
# If provided, write xml:base attribute for the RDF
if "xml_base" in args:
write(' xml:base="%s"\n' % args["xml_base"])
elif self.base:
write(' xml:base="%s"\n' % self.base)
# TODO:
# assert(
# namespaces["http://www.w3.org/1999/02/22-rdf-syntax-ns#"]=='rdf')
bindings = list(self.__bindings())
bindings.sort()
for prefix, namespace in bindings:
if prefix:
write(' xmlns:%s="%s"\n' % (prefix, namespace))
else:
write(' xmlns="%s"\n' % namespace)
write(">\n")
# write out triples by subject
for subject in self.store.subjects():
self.subject(subject, 1)
# endRDF
write("</rdf:RDF>\n")
# Set to None so that the memory can get garbage collected.
# self.__serialized = None
del self.__serialized
def subject(self, subject, depth=1):
if subject not in self.__serialized:
self.__serialized[subject] = 1
if isinstance(subject, (BNode, URIRef)):
write = self.write
indent = " " * depth
element_name = "rdf:Description"
if isinstance(subject, BNode):
write('%s<%s rdf:nodeID="%s"' % (indent, element_name, subject))
else:
uri = quoteattr(self.relativize(subject))
write("%s<%s rdf:about=%s" % (indent, element_name, uri))
if (subject, None, None) in self.store:
write(">\n")
for predicate, object in self.store.predicate_objects(subject):
self.predicate(predicate, object, depth + 1)
write("%s</%s>\n" % (indent, element_name))
else:
write("/>\n")
def predicate(self, predicate, object, depth=1):
write = self.write
indent = " " * depth
qname = self.store.namespace_manager.qname_strict(predicate)
if isinstance(object, Literal):
attributes = ""
if object.language:
attributes += ' xml:lang="%s"' % object.language
if object.datatype:
attributes += ' rdf:datatype="%s"' % object.datatype
write(
"%s<%s%s>%s</%s>\n"
% (indent, qname, attributes, escape(object, ESCAPE_ENTITIES), qname)
)
else:
if isinstance(object, BNode):
write('%s<%s rdf:nodeID="%s"/>\n' % (indent, qname, object))
else:
write(
"%s<%s rdf:resource=%s/>\n"
% (indent, qname, quoteattr(self.relativize(object)))
)
XMLLANG = "http://www.w3.org/XML/1998/namespacelang"
XMLBASE = "http://www.w3.org/XML/1998/namespacebase"
OWL_NS = Namespace("http://www.w3.org/2002/07/owl#")
# TODO:
def fix(val):
"strip off _: from nodeIDs... as they are not valid NCNames"
if val.startswith("_:"):
return val[2:]
else:
return val
class PrettyXMLSerializer(Serializer):
def __init__(self, store: Graph, max_depth=3):
super(PrettyXMLSerializer, self).__init__(store)
self.forceRDFAbout: Set[URIRef] = set()
def serialize(
self,
stream: IO[bytes],
base: Optional[str] = None,
encoding: Optional[str] = None,
**args,
):
self.__serialized: Dict[Identifier, int] = {}
store = self.store
# if base is given here, use that, if not and a base is set for the graph use that
if base is not None:
self.base = base
elif store.base is not None:
self.base = store.base
self.max_depth = args.get("max_depth", 3)
assert self.max_depth > 0, "max_depth must be greater than 0"
self.nm = nm = store.namespace_manager
self.writer = writer = XMLWriter(stream, nm, encoding)
namespaces = {}
possible = set(store.predicates()).union(store.objects(None, RDF.type))
for predicate in possible:
prefix, namespace, local = nm.compute_qname_strict(predicate)
namespaces[prefix] = namespace
namespaces["rdf"] = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
writer.push(RDFVOC.RDF)
if "xml_base" in args:
writer.attribute(XMLBASE, args["xml_base"])
elif self.base:
writer.attribute(XMLBASE, self.base)
writer.namespaces(namespaces.items())
subject: Identifier
# Write out subjects that can not be inline
for subject in store.subjects(): # type: ignore[assignment]
if (None, None, subject) in store:
if (subject, None, subject) in store:
self.subject(subject, 1)
else:
self.subject(subject, 1)
# write out anything that has not yet been reached
# write out BNodes last (to ensure they can be inlined where possible)
bnodes = set()
for subject in store.subjects(): # type: ignore[assignment]
if isinstance(subject, BNode):
bnodes.add(subject)
continue
self.subject(subject, 1)
# now serialize only those BNodes that have not been serialized yet
for bnode in bnodes:
if bnode not in self.__serialized:
self.subject(subject, 1)
writer.pop(RDFVOC.RDF)
stream.write("\n".encode("latin-1"))
# Set to None so that the memory can get garbage collected.
self.__serialized = None # type: ignore[assignment]
def subject(self, subject: Identifier, depth: int = 1):
store = self.store
writer = self.writer
if subject in self.forceRDFAbout:
writer.push(RDFVOC.Description)
writer.attribute(RDFVOC.about, self.relativize(subject))
writer.pop(RDFVOC.Description)
self.forceRDFAbout.remove(subject) # type: ignore[arg-type]
elif subject not in self.__serialized:
self.__serialized[subject] = 1
type = first(store.objects(subject, RDF.type))
try:
self.nm.qname(type)
except:
type = None
element = type or RDFVOC.Description
writer.push(element)
if isinstance(subject, BNode):
def subj_as_obj_more_than(ceil):
return True
# more_than(store.triples((None, None, subject)), ceil)
# here we only include BNode labels if they are referenced
# more than once (this reduces the use of redundant BNode
# identifiers)
if subj_as_obj_more_than(1):
writer.attribute(RDFVOC.nodeID, fix(subject))
else:
writer.attribute(RDFVOC.about, self.relativize(subject))
if (subject, None, None) in store:
for predicate, object in store.predicate_objects(subject):
if not (predicate == RDF.type and object == type):
self.predicate(predicate, object, depth + 1)
writer.pop(element)
elif subject in self.forceRDFAbout:
# TODO FIXME?: this looks like a duplicate of first condition
writer.push(RDFVOC.Description)
writer.attribute(RDFVOC.about, self.relativize(subject))
writer.pop(RDFVOC.Description)
self.forceRDFAbout.remove(subject) # type: ignore[arg-type]
def predicate(self, predicate, object, depth=1):
writer = self.writer
store = self.store
writer.push(predicate)
if isinstance(object, Literal):
if object.language:
writer.attribute(XMLLANG, object.language)
if object.datatype == RDF.XMLLiteral and isinstance(
object.value, xml.dom.minidom.Document
):
writer.attribute(RDFVOC.parseType, "Literal")
writer.text("")
writer.stream.write(object)
else:
if object.datatype:
writer.attribute(RDFVOC.datatype, object.datatype)
writer.text(object)
elif object in self.__serialized or not (object, None, None) in store:
if isinstance(object, BNode):
if more_than(store.triples((None, None, object)), 0):
writer.attribute(RDFVOC.nodeID, fix(object))
else:
writer.attribute(RDFVOC.resource, self.relativize(object))
else:
if first(store.objects(object, RDF.first)): # may not have type
# RDF.List
self.__serialized[object] = 1
# Warn that any assertions on object other than
# RDF.first and RDF.rest are ignored... including RDF.List
import warnings
warnings.warn(
"Assertions on %s other than RDF.first " % repr(object)
+ "and RDF.rest are ignored ... including RDF.List",
UserWarning,
stacklevel=2,
)
writer.attribute(RDFVOC.parseType, "Collection")
col = Collection(store, object)
for item in col:
if isinstance(item, URIRef):
self.forceRDFAbout.add(item)
self.subject(item)
if not isinstance(item, URIRef):
self.__serialized[item] = 1
else:
if first(
store.triples_choices(
(object, RDF.type, [OWL_NS.Class, RDFS.Class])
)
) and isinstance(object, URIRef):
writer.attribute(RDFVOC.resource, self.relativize(object))
elif depth <= self.max_depth:
self.subject(object, depth + 1)
elif isinstance(object, BNode):
if (
object not in self.__serialized
and (object, None, None) in store
and len(list(store.subjects(object=object))) == 1
):
# inline blank nodes if they haven't been serialized yet
# and are only referenced once (regardless of depth)
self.subject(object, depth + 1)
else:
writer.attribute(RDFVOC.nodeID, fix(object))
else:
writer.attribute(RDFVOC.resource, self.relativize(object))
writer.pop(predicate)
| 34.760753
| 90
| 0.551079
|
from typing import IO, Dict, Optional, Set
from rdflib.plugins.serializers.xmlwriter import XMLWriter
from rdflib.namespace import Namespace, RDF, RDFS
from rdflib.plugins.parsers.RDFVOC import RDFVOC
from rdflib.graph import Graph
from rdflib.term import Identifier, URIRef, Literal, BNode
from rdflib.util import first, more_than
from rdflib.collection import Collection
from rdflib.serializer import Serializer
from xml.sax.saxutils import quoteattr, escape
import xml.dom.minidom
from .xmlwriter import ESCAPE_ENTITIES
__all__ = ["fix", "XMLSerializer", "PrettyXMLSerializer"]
class XMLSerializer(Serializer):
def __init__(self, store: Graph):
super(XMLSerializer, self).__init__(store)
def __bindings(self):
store = self.store
nm = store.namespace_manager
bindings = {}
for predicate in set(store.predicates()):
prefix, namespace, name = nm.compute_qname_strict(predicate)
bindings[prefix] = URIRef(namespace)
RDFNS = URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
if "rdf" in bindings:
assert bindings["rdf"] == RDFNS
else:
bindings["rdf"] = RDFNS
for prefix, namespace in bindings.items():
yield prefix, namespace
def serialize(
self,
stream: IO[bytes],
base: Optional[str] = None,
encoding: Optional[str] = None,
**args,
):
if base is not None:
self.base = base
elif self.store.base is not None:
self.base = self.store.base
self.__stream = stream
self.__serialized: Dict[Identifier, int] = {}
encoding = self.encoding
self.write = write = lambda uni: stream.write(uni.encode(encoding, "replace"))
write('<?xml version="1.0" encoding="%s"?>\n' % self.encoding)
write("<rdf:RDF\n")
if "xml_base" in args:
write(' xml:base="%s"\n' % args["xml_base"])
elif self.base:
write(' xml:base="%s"\n' % self.base)
bindings = list(self.__bindings())
bindings.sort()
for prefix, namespace in bindings:
if prefix:
write(' xmlns:%s="%s"\n' % (prefix, namespace))
else:
write(' xmlns="%s"\n' % namespace)
write(">\n")
for subject in self.store.subjects():
self.subject(subject, 1)
write("</rdf:RDF>\n")
del self.__serialized
def subject(self, subject, depth=1):
if subject not in self.__serialized:
self.__serialized[subject] = 1
if isinstance(subject, (BNode, URIRef)):
write = self.write
indent = " " * depth
element_name = "rdf:Description"
if isinstance(subject, BNode):
write('%s<%s rdf:nodeID="%s"' % (indent, element_name, subject))
else:
uri = quoteattr(self.relativize(subject))
write("%s<%s rdf:about=%s" % (indent, element_name, uri))
if (subject, None, None) in self.store:
write(">\n")
for predicate, object in self.store.predicate_objects(subject):
self.predicate(predicate, object, depth + 1)
write("%s</%s>\n" % (indent, element_name))
else:
write("/>\n")
def predicate(self, predicate, object, depth=1):
write = self.write
indent = " " * depth
qname = self.store.namespace_manager.qname_strict(predicate)
if isinstance(object, Literal):
attributes = ""
if object.language:
attributes += ' xml:lang="%s"' % object.language
if object.datatype:
attributes += ' rdf:datatype="%s"' % object.datatype
write(
"%s<%s%s>%s</%s>\n"
% (indent, qname, attributes, escape(object, ESCAPE_ENTITIES), qname)
)
else:
if isinstance(object, BNode):
write('%s<%s rdf:nodeID="%s"/>\n' % (indent, qname, object))
else:
write(
"%s<%s rdf:resource=%s/>\n"
% (indent, qname, quoteattr(self.relativize(object)))
)
XMLLANG = "http://www.w3.org/XML/1998/namespacelang"
XMLBASE = "http://www.w3.org/XML/1998/namespacebase"
OWL_NS = Namespace("http://www.w3.org/2002/07/owl#")
def fix(val):
if val.startswith("_:"):
return val[2:]
else:
return val
class PrettyXMLSerializer(Serializer):
def __init__(self, store: Graph, max_depth=3):
super(PrettyXMLSerializer, self).__init__(store)
self.forceRDFAbout: Set[URIRef] = set()
def serialize(
self,
stream: IO[bytes],
base: Optional[str] = None,
encoding: Optional[str] = None,
**args,
):
self.__serialized: Dict[Identifier, int] = {}
store = self.store
if base is not None:
self.base = base
elif store.base is not None:
self.base = store.base
self.max_depth = args.get("max_depth", 3)
assert self.max_depth > 0, "max_depth must be greater than 0"
self.nm = nm = store.namespace_manager
self.writer = writer = XMLWriter(stream, nm, encoding)
namespaces = {}
possible = set(store.predicates()).union(store.objects(None, RDF.type))
for predicate in possible:
prefix, namespace, local = nm.compute_qname_strict(predicate)
namespaces[prefix] = namespace
namespaces["rdf"] = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
writer.push(RDFVOC.RDF)
if "xml_base" in args:
writer.attribute(XMLBASE, args["xml_base"])
elif self.base:
writer.attribute(XMLBASE, self.base)
writer.namespaces(namespaces.items())
subject: Identifier
for subject in store.subjects():
if (None, None, subject) in store:
if (subject, None, subject) in store:
self.subject(subject, 1)
else:
self.subject(subject, 1)
bnodes = set()
for subject in store.subjects():
if isinstance(subject, BNode):
bnodes.add(subject)
continue
self.subject(subject, 1)
for bnode in bnodes:
if bnode not in self.__serialized:
self.subject(subject, 1)
writer.pop(RDFVOC.RDF)
stream.write("\n".encode("latin-1"))
self.__serialized = None
def subject(self, subject: Identifier, depth: int = 1):
store = self.store
writer = self.writer
if subject in self.forceRDFAbout:
writer.push(RDFVOC.Description)
writer.attribute(RDFVOC.about, self.relativize(subject))
writer.pop(RDFVOC.Description)
self.forceRDFAbout.remove(subject)
elif subject not in self.__serialized:
self.__serialized[subject] = 1
type = first(store.objects(subject, RDF.type))
try:
self.nm.qname(type)
except:
type = None
element = type or RDFVOC.Description
writer.push(element)
if isinstance(subject, BNode):
def subj_as_obj_more_than(ceil):
return True
if subj_as_obj_more_than(1):
writer.attribute(RDFVOC.nodeID, fix(subject))
else:
writer.attribute(RDFVOC.about, self.relativize(subject))
if (subject, None, None) in store:
for predicate, object in store.predicate_objects(subject):
if not (predicate == RDF.type and object == type):
self.predicate(predicate, object, depth + 1)
writer.pop(element)
elif subject in self.forceRDFAbout:
writer.push(RDFVOC.Description)
writer.attribute(RDFVOC.about, self.relativize(subject))
writer.pop(RDFVOC.Description)
self.forceRDFAbout.remove(subject)
def predicate(self, predicate, object, depth=1):
writer = self.writer
store = self.store
writer.push(predicate)
if isinstance(object, Literal):
if object.language:
writer.attribute(XMLLANG, object.language)
if object.datatype == RDF.XMLLiteral and isinstance(
object.value, xml.dom.minidom.Document
):
writer.attribute(RDFVOC.parseType, "Literal")
writer.text("")
writer.stream.write(object)
else:
if object.datatype:
writer.attribute(RDFVOC.datatype, object.datatype)
writer.text(object)
elif object in self.__serialized or not (object, None, None) in store:
if isinstance(object, BNode):
if more_than(store.triples((None, None, object)), 0):
writer.attribute(RDFVOC.nodeID, fix(object))
else:
writer.attribute(RDFVOC.resource, self.relativize(object))
else:
if first(store.objects(object, RDF.first)):
self.__serialized[object] = 1
import warnings
warnings.warn(
"Assertions on %s other than RDF.first " % repr(object)
+ "and RDF.rest are ignored ... including RDF.List",
UserWarning,
stacklevel=2,
)
writer.attribute(RDFVOC.parseType, "Collection")
col = Collection(store, object)
for item in col:
if isinstance(item, URIRef):
self.forceRDFAbout.add(item)
self.subject(item)
if not isinstance(item, URIRef):
self.__serialized[item] = 1
else:
if first(
store.triples_choices(
(object, RDF.type, [OWL_NS.Class, RDFS.Class])
)
) and isinstance(object, URIRef):
writer.attribute(RDFVOC.resource, self.relativize(object))
elif depth <= self.max_depth:
self.subject(object, depth + 1)
elif isinstance(object, BNode):
if (
object not in self.__serialized
and (object, None, None) in store
and len(list(store.subjects(object=object))) == 1
):
# and are only referenced once (regardless of depth)
self.subject(object, depth + 1)
else:
writer.attribute(RDFVOC.nodeID, fix(object))
else:
writer.attribute(RDFVOC.resource, self.relativize(object))
writer.pop(predicate)
| true
| true
|
790c2497814d48278330cf0b6b21cb81bcfa4193
| 13,916
|
py
|
Python
|
tests/storage_test.py
|
phanirajl/cassandra-medusa
|
04315068365fc372b6a26d8b0ed6d2b135db1d98
|
[
"Apache-2.0"
] | null | null | null |
tests/storage_test.py
|
phanirajl/cassandra-medusa
|
04315068365fc372b6a26d8b0ed6d2b135db1d98
|
[
"Apache-2.0"
] | null | null | null |
tests/storage_test.py
|
phanirajl/cassandra-medusa
|
04315068365fc372b6a26d8b0ed6d2b135db1d98
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 Spotify AB. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import configparser
import datetime
import hashlib
import os
import shutil
import tempfile
import unittest
import medusa.storage.abstract_storage
from medusa.backup import generate_md5_hash
from medusa.config import MedusaConfig, StorageConfig, _namedtuple_from_dict, CassandraConfig
from medusa.index import build_indices
from medusa.storage import Storage
class RestoreNodeTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.local_storage_dir = "/tmp/medusa_local_storage"
self.medusa_bucket_dir = "/tmp/medusa_test_bucket"
def setUp(self):
if os.path.isdir(self.local_storage_dir):
shutil.rmtree(self.local_storage_dir)
if os.path.isdir(self.medusa_bucket_dir):
shutil.rmtree(self.medusa_bucket_dir)
os.makedirs(self.local_storage_dir)
config = configparser.ConfigParser(interpolation=None)
config['storage'] = {
'host_file_separator': ',',
'bucket_name': 'medusa_test_bucket',
'key_file': '',
'storage_provider': 'local',
'prefix': '',
'fqdn': '127.0.0.1',
'api_key_or_username': '',
'api_secret_or_password': '',
'base_path': '/tmp'
}
config['cassandra'] = {
'is_ccm': 1
}
self.config = MedusaConfig(
storage=_namedtuple_from_dict(StorageConfig, config['storage']),
cassandra=_namedtuple_from_dict(CassandraConfig, config['cassandra']),
monitoring={},
ssh=None,
restore=None
)
self.storage = Storage(config=self.config.storage)
def test_add_object_from_string(self):
file_content = "content of the test file"
self.storage.storage_driver.upload_blob_from_string("test1/file.txt", file_content)
self.assertEqual(self.storage.storage_driver.get_blob_content_as_string("test1/file.txt"), file_content)
def test_download_blobs(self):
files_to_download = list()
file1_content = "content of the test file1"
file2_content = "content of the test file2"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
files_to_download.append("test_download_blobs1/file1.txt")
self.storage.storage_driver.upload_blob_from_string("test_download_blobs2/file2.txt", file2_content)
files_to_download.append("test_download_blobs2/file2.txt")
self.assertEqual(len(os.listdir(self.medusa_bucket_dir)), 2)
self.storage.storage_driver.download_blobs(files_to_download, self.local_storage_dir)
self.assertEqual(len(os.listdir(self.local_storage_dir)), 2)
def test_list_objects(self):
file1_content = "content of the test file1"
file2_content = "content of the test file2"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
self.storage.storage_driver.upload_blob_from_string("test_download_blobs2/file2.txt", file2_content)
objects = self.storage.storage_driver.list_objects()
self.assertEqual(len(objects), 2)
one_object = self.storage.storage_driver.list_objects("test_download_blobs2")
self.assertEqual(len(one_object), 1)
def test_read_blob(self):
file1_content = "content of the test file1"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
objects = self.storage.storage_driver.list_objects("test_download_blobs1")
object_content = self.storage.storage_driver.read_blob_as_string(objects[0])
self.assertEqual(object_content, file1_content)
def test_get_blob(self):
file1_content = "content of the test file1"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
obj = self.storage.storage_driver.get_blob("test_download_blobs1/file1.txt")
self.assertEqual(obj.name, "test_download_blobs1/file1.txt")
def test_read_blob_as_bytes(self):
file1_content = "content of the test file1"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
object_content = self.storage.storage_driver.get_blob_content_as_bytes("test_download_blobs1/file1.txt")
self.assertEqual(object_content, b"content of the test file1")
def test_verify_hash(self):
file1_content = "content of the test file1"
manifest = self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
obj = self.storage.storage_driver.get_blob("test_download_blobs1/file1.txt")
self.assertEqual(manifest.MD5, obj.hash)
def test_hashes_match(self):
# Should match
hash1 = "S1EAM/BVMqhbJnAUs/nWlQ=="
hash2 = "4b510033f05532a85b267014b3f9d695"
self.assertTrue(
medusa.storage.abstract_storage.AbstractStorage.hashes_match(hash1, hash2)
)
# Should match
hash1 = "4b510033f05532a85b267014b3f9d695"
hash2 = "4b510033f05532a85b267014b3f9d695"
self.assertTrue(
medusa.storage.abstract_storage.AbstractStorage.hashes_match(hash1, hash2)
)
# Should not match
hash1 = "S1EAM/BVMqhbJnAUs/nWlQsdfsdf=="
hash2 = "4b510033f05532a85b267014b3f9d695"
self.assertFalse(
medusa.storage.abstract_storage.AbstractStorage.hashes_match(hash1, hash2)
)
def test_generate_md5_hash(self):
with tempfile.NamedTemporaryFile() as tf:
# write random bytes
two_megabytes = 2 * 1024 * 1024
tf.write(os.urandom(two_megabytes))
tf.flush()
# compute checksum of the whole file at once
tf.seek(0)
checksum_full = hashlib.md5(tf.read()).digest()
digest_full = base64.encodestring(checksum_full).decode('UTF-8').strip()
# compute checksum using default-size chunks
tf.seek(0)
digest_chunk = generate_md5_hash(tf.name)
# compare the digests
self.assertEqual(digest_chunk, digest_full)
# compute checksum using custom size chunks
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=128))
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=256))
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=1024))
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=100000000)) # 100M
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=-1))
tf.seek(0)
self.assertNotEqual(digest_full, generate_md5_hash(tf.name, block_size=0))
def test_get_object_datetime(self):
file1_content = "content of the test file1"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
obj = self.storage.storage_driver.get_blob("test_download_blobs1/file1.txt")
self.assertEqual(
datetime.datetime.fromtimestamp(int(obj.extra["modify_time"])),
self.storage.storage_driver.get_object_datetime(obj)
)
def test_get_fqdn_from_backup_index_blob(self):
blob_name = "index/backup_index/2019051307/manifest_node1.whatever.com.json"
self.assertEqual(
"node1.whatever.com",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "index/backup_index/2019051307/schema_node2.whatever.com.cql"
self.assertEqual(
"node2.whatever.com",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "index/backup_index/2019051307/schema_node3.whatever.com.txt"
self.assertEqual(
"node3.whatever.com",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "index/backup_index/2019051307/schema_node_with_underscores.whatever.com.txt"
self.assertEqual(
"node_with_underscores.whatever.com",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
def test_get_fqdn_from_any_index_blob(self):
blob_name = "tokenmap_hostname-with-dashes-and-3-numbers.json"
self.assertEqual(
"hostname-with-dashes-and-3-numbers",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "tokenmap_hostname-with-dashes.and-dots.json"
self.assertEqual(
"hostname-with-dashes.and-dots",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "tokenmap_hostname_with-underscores.and-dots-and.dashes.json"
self.assertEqual(
"hostname_with-underscores.and-dots-and.dashes",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "index/bi/third_backup/finished_localhost_1574343029.timestamp"
self.assertEqual(
"localhost",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
def test_parse_backup_index(self):
file_content = "content of the test file"
# SSTables for node1 and backup1
self.storage.storage_driver.upload_blob_from_string("node1/backup1/data/ks1/sstable1.db", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup1/data/ks1/sstable2.db", file_content)
# Metadata for node1 and backup1
self.storage.storage_driver.upload_blob_from_string("node1/backup1/meta/tokenmap.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup1/meta/manifest.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup1/meta/schema.cql", file_content)
# SSTables for node2 and backup1
self.storage.storage_driver.upload_blob_from_string("node2/backup1/data/ks1/sstable1.db", file_content)
self.storage.storage_driver.upload_blob_from_string("node2/backup1/data/ks1/sstable2.db", file_content)
# Metadata for node2 and backup1
self.storage.storage_driver.upload_blob_from_string("node2/backup1/meta/tokenmap.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node2/backup1/meta/manifest.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node2/backup1/meta/schema.cql", file_content)
# SSTables for node1 and backup2
self.storage.storage_driver.upload_blob_from_string("node1/backup2/data/ks1/sstable1.db", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup2/data/ks1/sstable2.db", file_content)
# Metadata for node1 and backup2
self.storage.storage_driver.upload_blob_from_string("node1/backup2/meta/tokenmap.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup2/meta/manifest.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup2/meta/schema.cql", file_content)
build_indices(self.config, False)
path = 'index/backup_index'
backup_index = self.storage.storage_driver.list_objects(path)
blobs_by_backup = self.storage.group_backup_index_by_backup_and_node(backup_index)
self.assertTrue("backup1" in blobs_by_backup)
self.assertTrue("backup2" in blobs_by_backup)
self.assertTrue("node1" in blobs_by_backup["backup1"])
self.assertTrue("node2" in blobs_by_backup["backup1"])
self.assertTrue("node1" in blobs_by_backup["backup2"])
self.assertFalse("node2" in blobs_by_backup["backup2"])
def test_remove_extension(self):
self.assertEqual(
'localhost',
self.storage.remove_extension('localhost.txt')
)
self.assertEqual(
'localhost',
self.storage.remove_extension('localhost.timestamp')
)
self.assertEqual(
'localhost',
self.storage.remove_extension('localhost.cql')
)
self.assertEqual(
'localhost.foo',
self.storage.remove_extension('localhost.foo')
)
def test_get_timestamp_from_blob_name(self):
self.assertEqual(
1558021519,
self.storage.get_timestamp_from_blob_name('finished_localhost_1558021519.timestamp')
)
self.assertEqual(
1558021519,
self.storage.get_timestamp_from_blob_name('finished_some.host.net_1558021519.timestamp')
)
self.assertEqual(
1558021519,
self.storage.get_timestamp_from_blob_name('finished_some_underscores.host.net_1558021519.timestamp')
)
self.assertEqual(
1574343029,
self.storage.get_timestamp_from_blob_name('index/bi/third_backup/finished_localhost_1574343029.timestamp')
)
if __name__ == '__main__':
unittest.main()
| 45.035599
| 119
| 0.69359
|
import base64
import configparser
import datetime
import hashlib
import os
import shutil
import tempfile
import unittest
import medusa.storage.abstract_storage
from medusa.backup import generate_md5_hash
from medusa.config import MedusaConfig, StorageConfig, _namedtuple_from_dict, CassandraConfig
from medusa.index import build_indices
from medusa.storage import Storage
class RestoreNodeTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.local_storage_dir = "/tmp/medusa_local_storage"
self.medusa_bucket_dir = "/tmp/medusa_test_bucket"
def setUp(self):
if os.path.isdir(self.local_storage_dir):
shutil.rmtree(self.local_storage_dir)
if os.path.isdir(self.medusa_bucket_dir):
shutil.rmtree(self.medusa_bucket_dir)
os.makedirs(self.local_storage_dir)
config = configparser.ConfigParser(interpolation=None)
config['storage'] = {
'host_file_separator': ',',
'bucket_name': 'medusa_test_bucket',
'key_file': '',
'storage_provider': 'local',
'prefix': '',
'fqdn': '127.0.0.1',
'api_key_or_username': '',
'api_secret_or_password': '',
'base_path': '/tmp'
}
config['cassandra'] = {
'is_ccm': 1
}
self.config = MedusaConfig(
storage=_namedtuple_from_dict(StorageConfig, config['storage']),
cassandra=_namedtuple_from_dict(CassandraConfig, config['cassandra']),
monitoring={},
ssh=None,
restore=None
)
self.storage = Storage(config=self.config.storage)
def test_add_object_from_string(self):
file_content = "content of the test file"
self.storage.storage_driver.upload_blob_from_string("test1/file.txt", file_content)
self.assertEqual(self.storage.storage_driver.get_blob_content_as_string("test1/file.txt"), file_content)
def test_download_blobs(self):
files_to_download = list()
file1_content = "content of the test file1"
file2_content = "content of the test file2"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
files_to_download.append("test_download_blobs1/file1.txt")
self.storage.storage_driver.upload_blob_from_string("test_download_blobs2/file2.txt", file2_content)
files_to_download.append("test_download_blobs2/file2.txt")
self.assertEqual(len(os.listdir(self.medusa_bucket_dir)), 2)
self.storage.storage_driver.download_blobs(files_to_download, self.local_storage_dir)
self.assertEqual(len(os.listdir(self.local_storage_dir)), 2)
def test_list_objects(self):
file1_content = "content of the test file1"
file2_content = "content of the test file2"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
self.storage.storage_driver.upload_blob_from_string("test_download_blobs2/file2.txt", file2_content)
objects = self.storage.storage_driver.list_objects()
self.assertEqual(len(objects), 2)
one_object = self.storage.storage_driver.list_objects("test_download_blobs2")
self.assertEqual(len(one_object), 1)
def test_read_blob(self):
file1_content = "content of the test file1"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
objects = self.storage.storage_driver.list_objects("test_download_blobs1")
object_content = self.storage.storage_driver.read_blob_as_string(objects[0])
self.assertEqual(object_content, file1_content)
def test_get_blob(self):
file1_content = "content of the test file1"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
obj = self.storage.storage_driver.get_blob("test_download_blobs1/file1.txt")
self.assertEqual(obj.name, "test_download_blobs1/file1.txt")
def test_read_blob_as_bytes(self):
file1_content = "content of the test file1"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
object_content = self.storage.storage_driver.get_blob_content_as_bytes("test_download_blobs1/file1.txt")
self.assertEqual(object_content, b"content of the test file1")
def test_verify_hash(self):
file1_content = "content of the test file1"
manifest = self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
obj = self.storage.storage_driver.get_blob("test_download_blobs1/file1.txt")
self.assertEqual(manifest.MD5, obj.hash)
def test_hashes_match(self):
hash1 = "S1EAM/BVMqhbJnAUs/nWlQ=="
hash2 = "4b510033f05532a85b267014b3f9d695"
self.assertTrue(
medusa.storage.abstract_storage.AbstractStorage.hashes_match(hash1, hash2)
)
hash1 = "4b510033f05532a85b267014b3f9d695"
hash2 = "4b510033f05532a85b267014b3f9d695"
self.assertTrue(
medusa.storage.abstract_storage.AbstractStorage.hashes_match(hash1, hash2)
)
hash1 = "S1EAM/BVMqhbJnAUs/nWlQsdfsdf=="
hash2 = "4b510033f05532a85b267014b3f9d695"
self.assertFalse(
medusa.storage.abstract_storage.AbstractStorage.hashes_match(hash1, hash2)
)
def test_generate_md5_hash(self):
with tempfile.NamedTemporaryFile() as tf:
two_megabytes = 2 * 1024 * 1024
tf.write(os.urandom(two_megabytes))
tf.flush()
tf.seek(0)
checksum_full = hashlib.md5(tf.read()).digest()
digest_full = base64.encodestring(checksum_full).decode('UTF-8').strip()
tf.seek(0)
digest_chunk = generate_md5_hash(tf.name)
self.assertEqual(digest_chunk, digest_full)
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=128))
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=256))
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=1024))
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=100000000))
tf.seek(0)
self.assertEqual(digest_full, generate_md5_hash(tf.name, block_size=-1))
tf.seek(0)
self.assertNotEqual(digest_full, generate_md5_hash(tf.name, block_size=0))
def test_get_object_datetime(self):
file1_content = "content of the test file1"
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
obj = self.storage.storage_driver.get_blob("test_download_blobs1/file1.txt")
self.assertEqual(
datetime.datetime.fromtimestamp(int(obj.extra["modify_time"])),
self.storage.storage_driver.get_object_datetime(obj)
)
def test_get_fqdn_from_backup_index_blob(self):
blob_name = "index/backup_index/2019051307/manifest_node1.whatever.com.json"
self.assertEqual(
"node1.whatever.com",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "index/backup_index/2019051307/schema_node2.whatever.com.cql"
self.assertEqual(
"node2.whatever.com",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "index/backup_index/2019051307/schema_node3.whatever.com.txt"
self.assertEqual(
"node3.whatever.com",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "index/backup_index/2019051307/schema_node_with_underscores.whatever.com.txt"
self.assertEqual(
"node_with_underscores.whatever.com",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
def test_get_fqdn_from_any_index_blob(self):
blob_name = "tokenmap_hostname-with-dashes-and-3-numbers.json"
self.assertEqual(
"hostname-with-dashes-and-3-numbers",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "tokenmap_hostname-with-dashes.and-dots.json"
self.assertEqual(
"hostname-with-dashes.and-dots",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "tokenmap_hostname_with-underscores.and-dots-and.dashes.json"
self.assertEqual(
"hostname_with-underscores.and-dots-and.dashes",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
blob_name = "index/bi/third_backup/finished_localhost_1574343029.timestamp"
self.assertEqual(
"localhost",
self.storage.get_fqdn_from_any_index_blob(blob_name)
)
def test_parse_backup_index(self):
file_content = "content of the test file"
self.storage.storage_driver.upload_blob_from_string("node1/backup1/data/ks1/sstable1.db", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup1/data/ks1/sstable2.db", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup1/meta/tokenmap.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup1/meta/manifest.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup1/meta/schema.cql", file_content)
self.storage.storage_driver.upload_blob_from_string("node2/backup1/data/ks1/sstable1.db", file_content)
self.storage.storage_driver.upload_blob_from_string("node2/backup1/data/ks1/sstable2.db", file_content)
self.storage.storage_driver.upload_blob_from_string("node2/backup1/meta/tokenmap.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node2/backup1/meta/manifest.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node2/backup1/meta/schema.cql", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup2/data/ks1/sstable1.db", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup2/data/ks1/sstable2.db", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup2/meta/tokenmap.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup2/meta/manifest.json", file_content)
self.storage.storage_driver.upload_blob_from_string("node1/backup2/meta/schema.cql", file_content)
build_indices(self.config, False)
path = 'index/backup_index'
backup_index = self.storage.storage_driver.list_objects(path)
blobs_by_backup = self.storage.group_backup_index_by_backup_and_node(backup_index)
self.assertTrue("backup1" in blobs_by_backup)
self.assertTrue("backup2" in blobs_by_backup)
self.assertTrue("node1" in blobs_by_backup["backup1"])
self.assertTrue("node2" in blobs_by_backup["backup1"])
self.assertTrue("node1" in blobs_by_backup["backup2"])
self.assertFalse("node2" in blobs_by_backup["backup2"])
def test_remove_extension(self):
self.assertEqual(
'localhost',
self.storage.remove_extension('localhost.txt')
)
self.assertEqual(
'localhost',
self.storage.remove_extension('localhost.timestamp')
)
self.assertEqual(
'localhost',
self.storage.remove_extension('localhost.cql')
)
self.assertEqual(
'localhost.foo',
self.storage.remove_extension('localhost.foo')
)
def test_get_timestamp_from_blob_name(self):
self.assertEqual(
1558021519,
self.storage.get_timestamp_from_blob_name('finished_localhost_1558021519.timestamp')
)
self.assertEqual(
1558021519,
self.storage.get_timestamp_from_blob_name('finished_some.host.net_1558021519.timestamp')
)
self.assertEqual(
1558021519,
self.storage.get_timestamp_from_blob_name('finished_some_underscores.host.net_1558021519.timestamp')
)
self.assertEqual(
1574343029,
self.storage.get_timestamp_from_blob_name('index/bi/third_backup/finished_localhost_1574343029.timestamp')
)
if __name__ == '__main__':
unittest.main()
| true
| true
|
790c25789e022e8cde45afd367a9a0a195f53846
| 401
|
py
|
Python
|
src/stronghold/rosalind_qrt.py
|
cowboysmall/rosalind
|
021e4392a8fc946b97bbf86bbb8227b28bb5e462
|
[
"MIT"
] | null | null | null |
src/stronghold/rosalind_qrt.py
|
cowboysmall/rosalind
|
021e4392a8fc946b97bbf86bbb8227b28bb5e462
|
[
"MIT"
] | null | null | null |
src/stronghold/rosalind_qrt.py
|
cowboysmall/rosalind
|
021e4392a8fc946b97bbf86bbb8227b28bb5e462
|
[
"MIT"
] | null | null | null |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../tools'))
import files
import phylogeny
def main(argv):
lines = files.read_lines(argv[0])
taxa = lines[0].split()
table = lines[1:]
print '\n'.join('{%s, %s} {%s, %s}' % (a1, a2, b1, b2) for ((a1, a2), (b1, b2)) in phylogeny.quartets(taxa, table))
if __name__ == "__main__":
main(sys.argv[1:])
| 21.105263
| 119
| 0.610973
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../tools'))
import files
import phylogeny
def main(argv):
lines = files.read_lines(argv[0])
taxa = lines[0].split()
table = lines[1:]
print '\n'.join('{%s, %s} {%s, %s}' % (a1, a2, b1, b2) for ((a1, a2), (b1, b2)) in phylogeny.quartets(taxa, table))
if __name__ == "__main__":
main(sys.argv[1:])
| false
| true
|
790c259407daddb30593df39ad91cf4f0684b38d
| 4,131
|
py
|
Python
|
saleor/checkout/core.py
|
spartonia/saleor
|
88c54f58f9318e1bc3de3ab13a0fcf479cb533bb
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/checkout/core.py
|
spartonia/saleor
|
88c54f58f9318e1bc3de3ab13a0fcf479cb533bb
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/checkout/core.py
|
spartonia/saleor
|
88c54f58f9318e1bc3de3ab13a0fcf479cb533bb
|
[
"BSD-3-Clause"
] | null | null | null |
from collections import defaultdict
from django.conf import settings
from prices import Price
from satchless.process import ProcessManager
from .steps import ShippingAddressStep, ShippingMethodStep, SummaryStep, DetailsStep
from ..cart import Cart
from ..core import analytics
from ..order.models import Order
STORAGE_SESSION_KEY = 'checkout_storage'
class CheckoutStorage(defaultdict):
modified = False
def __init__(self, *args, **kwargs):
super(CheckoutStorage, self).__init__(dict, *args, **kwargs)
class Checkout(ProcessManager):
steps = None
def __init__(self, request):
self.request = request
self.steps = []
try:
self.storage = CheckoutStorage(
request.session[STORAGE_SESSION_KEY])
except KeyError:
self.storage = CheckoutStorage()
self.cart = Cart.for_session_cart(request.cart,
discounts=request.discounts)
self.generate_steps(self.cart)
def __iter__(self):
return iter(self.steps)
def generate_steps(self, cart):
self.cart = cart
self.details_step = DetailsStep(
self.request, self.storage['details'], checkout=self)
if self.is_shipping_required():
self.shipping_address_step = ShippingAddressStep(
self.request, self.storage['shipping_address'], checkout=self)
shipping_address = self.shipping_address_step.address
self.steps.append(self.shipping_address_step)
self.shipping_method_step = ShippingMethodStep(
self.request, self.storage['shipping_method'],
shipping_address, self.cart, checkout=self)
self.steps.append(self.shipping_method_step)
else:
shipping_address = None
self.shipping_address_step = None
self.shipping_method_step = None
summary_step = SummaryStep(self.request, self.storage['summary'],
shipping_address, checkout=self)
self.steps.append(summary_step)
def get_total(self, **kwargs):
zero = Price(0, currency=settings.DEFAULT_CURRENCY)
cost_iterator = (total_with_shipping
for shipping, shipping_cost, total_with_shipping
in self.get_deliveries(**kwargs))
total = sum(cost_iterator, zero)
return total
def save(self):
self.request.session[STORAGE_SESSION_KEY] = dict(self.storage)
def clear_storage(self):
try:
del self.request.session[STORAGE_SESSION_KEY]
except KeyError:
pass
self.cart.clear()
def is_shipping_required(self):
return self.cart.is_shipping_required()
def get_deliveries(self, **kwargs):
for partition in self.cart.partition():
if (self.shipping_address_step and
self.shipping_method_step.shipping_method):
shipping_method = self.shipping_method_step.shipping_method
shipping_cost = shipping_method.get_delivery_total(partition)
else:
shipping_cost = Price(0, currency=settings.DEFAULT_CURRENCY)
total_with_shipping = partition.get_total(**kwargs) + shipping_cost
yield partition, shipping_cost, total_with_shipping
def create_order(self):
order = Order()
if self.request.user.is_authenticated():
order.user = self.request.user
for step in self.steps:
step.add_to_order(order)
if self.request.user.is_authenticated():
order.anonymous_user_email = ''
order.tracking_client_id = analytics.get_client_id(self.request)
order.total = self.get_total()
order.save()
return order
def available_steps(self):
available = []
for step in self:
step.is_step_available = True
available.append(step)
if not self.validate_step(step):
break
step.is_step_valid = True
return available
| 35.307692
| 84
| 0.635924
|
from collections import defaultdict
from django.conf import settings
from prices import Price
from satchless.process import ProcessManager
from .steps import ShippingAddressStep, ShippingMethodStep, SummaryStep, DetailsStep
from ..cart import Cart
from ..core import analytics
from ..order.models import Order
STORAGE_SESSION_KEY = 'checkout_storage'
class CheckoutStorage(defaultdict):
modified = False
def __init__(self, *args, **kwargs):
super(CheckoutStorage, self).__init__(dict, *args, **kwargs)
class Checkout(ProcessManager):
steps = None
def __init__(self, request):
self.request = request
self.steps = []
try:
self.storage = CheckoutStorage(
request.session[STORAGE_SESSION_KEY])
except KeyError:
self.storage = CheckoutStorage()
self.cart = Cart.for_session_cart(request.cart,
discounts=request.discounts)
self.generate_steps(self.cart)
def __iter__(self):
return iter(self.steps)
def generate_steps(self, cart):
self.cart = cart
self.details_step = DetailsStep(
self.request, self.storage['details'], checkout=self)
if self.is_shipping_required():
self.shipping_address_step = ShippingAddressStep(
self.request, self.storage['shipping_address'], checkout=self)
shipping_address = self.shipping_address_step.address
self.steps.append(self.shipping_address_step)
self.shipping_method_step = ShippingMethodStep(
self.request, self.storage['shipping_method'],
shipping_address, self.cart, checkout=self)
self.steps.append(self.shipping_method_step)
else:
shipping_address = None
self.shipping_address_step = None
self.shipping_method_step = None
summary_step = SummaryStep(self.request, self.storage['summary'],
shipping_address, checkout=self)
self.steps.append(summary_step)
def get_total(self, **kwargs):
zero = Price(0, currency=settings.DEFAULT_CURRENCY)
cost_iterator = (total_with_shipping
for shipping, shipping_cost, total_with_shipping
in self.get_deliveries(**kwargs))
total = sum(cost_iterator, zero)
return total
def save(self):
self.request.session[STORAGE_SESSION_KEY] = dict(self.storage)
def clear_storage(self):
try:
del self.request.session[STORAGE_SESSION_KEY]
except KeyError:
pass
self.cart.clear()
def is_shipping_required(self):
return self.cart.is_shipping_required()
def get_deliveries(self, **kwargs):
for partition in self.cart.partition():
if (self.shipping_address_step and
self.shipping_method_step.shipping_method):
shipping_method = self.shipping_method_step.shipping_method
shipping_cost = shipping_method.get_delivery_total(partition)
else:
shipping_cost = Price(0, currency=settings.DEFAULT_CURRENCY)
total_with_shipping = partition.get_total(**kwargs) + shipping_cost
yield partition, shipping_cost, total_with_shipping
def create_order(self):
order = Order()
if self.request.user.is_authenticated():
order.user = self.request.user
for step in self.steps:
step.add_to_order(order)
if self.request.user.is_authenticated():
order.anonymous_user_email = ''
order.tracking_client_id = analytics.get_client_id(self.request)
order.total = self.get_total()
order.save()
return order
def available_steps(self):
available = []
for step in self:
step.is_step_available = True
available.append(step)
if not self.validate_step(step):
break
step.is_step_valid = True
return available
| true
| true
|
790c2691f57b39297379cb70543eb72f9152d12e
| 7,923
|
py
|
Python
|
lists_of_terms/shodule_for_lists_of_terms.py
|
ShawnJSavoie2/ToBeRedone
|
be1981969deb5ebb083e7ef2592e331746af757a
|
[
"MIT"
] | null | null | null |
lists_of_terms/shodule_for_lists_of_terms.py
|
ShawnJSavoie2/ToBeRedone
|
be1981969deb5ebb083e7ef2592e331746af757a
|
[
"MIT"
] | null | null | null |
lists_of_terms/shodule_for_lists_of_terms.py
|
ShawnJSavoie2/ToBeRedone
|
be1981969deb5ebb083e7ef2592e331746af757a
|
[
"MIT"
] | null | null | null |
# IDLE (Python 3.8.0)
# module_for_lists_of_terms
def termal_generator(lict):
length_of_termal_generator = 16
padding = length_of_termal_generator - len(lict)
count = padding
while count != 0:
lict.append([''])
count = count - 1
termal_lict = []
for first_inner in lict[0]:
for second_inner in lict[1]:
for third_inner in lict[2]:
for fourth_inner in lict[3]:
for fifth_inner in lict[4]:
for sixth_inner in lict[5]:
for seventh_inner in lict[6]:
for eighth_inner in lict[7]:
for ninth_inner in lict[8]:
for tenth_inner in lict[9]:
for eleventh_inner in lict[10]:
for twelfth_inner in lict[11]:
for thirteenth_inner in lict[12]:
for fourteenth_inner in lict [13]:
for fifteenth_inner in lict [14]:
for sixteenth_inner in lict[15]:
term = (
first_inner + second_inner +
third_inner + fourth_inner +
fifth_inner + sixth_inner +
seventh_inner + eighth_inner +
ninth_inner + tenth_inner +
eleventh_inner + twelfth_inner +
thirteenth_inner + fourteenth_inner +
fifteenth_inner + sixteenth_inner
)
termal_lict.append(term)
return termal_lict
def user_input_handling_function_second(dictionary):
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
lict = []
while good_to_go == 'no':
for key in dictionary:
lict.append(key)
for element in user_input:
if element not in lict:
print('The form can only contain a combination of the characters that represent the lists of characters.')
errors.append('yes')
break
if len(user_input) < 2:
print('The form is too short. It can\'t be less than two-characters long.')
errors.append('yes')
if len(user_input) > 8:
print('The form is too long. It can\'t be more than eight-characters long.')
errors.append('yes')
if 'yes' in errors:
good_to_go = 'no'
errors = []
print()
user_input = input('Re-enter: ')
print()
else:
good_to_go = 'yes'
return user_input
def user_input_handling_function_third():
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
yes_or_no = ['yes', 'no']
while good_to_go == 'no':
if user_input not in yes_or_no:
print('You have to answer yes or no.')
errors.append('yes')
if 'yes' in errors:
good_to_go = 'no'
errors = []
print()
user_input = input('Re-enter: ')
print()
else:
good_to_go = 'yes'
return user_input
def user_input_handling_function_fourth(dictionary):
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
while good_to_go == 'no':
if user_input not in dictionary:
print('The form you entered does not match one of the forms in your termal_dictionary. Each form in your')
print('termal_dictionary is a name (key) that has an associated definition (value) that is a list of terms')
print('that all have the same form as the name (key).')
errors.append('yes')
if 'yes' in errors:
good_to_go = 'no'
errors = []
print()
user_input = input('Re-enter: ')
print()
else:
good_to_go = 'yes'
return user_input
def user_input_handling_function_eighth():
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-']
while good_to_go == 'no':
if user_input == 'None':
user_input = None
return user_input
else:
for inner in user_input:
if inner not in digits:
print('The number must be an integer that consists of digits. For example: 1, -2, etc. or the keyword:')
print('None.')
errors.append('yes')
break
if 'yes' in errors:
good_to_go = 'no'
errors = []
print()
user_input = input('Re-enter: ')
print()
else:
good_to_go = 'yes'
return int(user_input)
def user_input_handling_function_ninth():
''' a parser '''
print()
user_input = input('Enter: ')
print()
term = ''
lict = []
for element in user_input:
if element != ' ':
term = term + element
else:
lict.append(term)
term = ''
lict.append(term) # because term might not be empty....
return lict
def user_input_handling_function_tenth(dictionary):
''' a dictionary checker '''
user_input = user_input_handling_function_ninth()
good_to_go = 'no'
errors = []
while good_to_go == 'no':
string = ''
lict = []
for element in user_input:
string = string + element
for key in dictionary:
for element in dictionary[key]:
lict.append(element)
for element in string:
if element not in lict:
print('One of your unwanted characters or combination of characters does not match the characters you')
print('entered earlier.')
errors.append('yes')
break
if 'yes' in errors:
print()
user_input = input('Re-enter: ')
print()
good_to_go = 'no'
errors = []
else:
good_to_go = 'yes'
return user_input
def print_vertical_lict(lict):
for element in lict:
print(element)
def print_horizontal_lict(lict):
string = ''
for element in lict:
string = string + str(element) + ', '
print(string)
print()
def write_vertical_lict(file_name, lict): # <--
file = open(file_name, 'w')
for element in lict:
element = str(element) + '\n'
file.write(element)
file.close()
def write_horizontal_lict(file_name, lict):
if '.txt' not in file_name:
file_name = file_name + '.txt'
row = ''
for index in range(len(lict)):
lict[index] = str(lict[index]) + ', '
if len(row + lict[index]) > 100:
lict[index - 1] = lict[index - 1] + '\n'
row = lict[index]
else:
row = row + lict[index]
file = open(file_name, 'w')
for term in lict:
file.write(term)
file.close()
| 35.370536
| 124
| 0.47015
|
def termal_generator(lict):
length_of_termal_generator = 16
padding = length_of_termal_generator - len(lict)
count = padding
while count != 0:
lict.append([''])
count = count - 1
termal_lict = []
for first_inner in lict[0]:
for second_inner in lict[1]:
for third_inner in lict[2]:
for fourth_inner in lict[3]:
for fifth_inner in lict[4]:
for sixth_inner in lict[5]:
for seventh_inner in lict[6]:
for eighth_inner in lict[7]:
for ninth_inner in lict[8]:
for tenth_inner in lict[9]:
for eleventh_inner in lict[10]:
for twelfth_inner in lict[11]:
for thirteenth_inner in lict[12]:
for fourteenth_inner in lict [13]:
for fifteenth_inner in lict [14]:
for sixteenth_inner in lict[15]:
term = (
first_inner + second_inner +
third_inner + fourth_inner +
fifth_inner + sixth_inner +
seventh_inner + eighth_inner +
ninth_inner + tenth_inner +
eleventh_inner + twelfth_inner +
thirteenth_inner + fourteenth_inner +
fifteenth_inner + sixteenth_inner
)
termal_lict.append(term)
return termal_lict
def user_input_handling_function_second(dictionary):
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
lict = []
while good_to_go == 'no':
for key in dictionary:
lict.append(key)
for element in user_input:
if element not in lict:
print('The form can only contain a combination of the characters that represent the lists of characters.')
errors.append('yes')
break
if len(user_input) < 2:
print('The form is too short. It can\'t be less than two-characters long.')
errors.append('yes')
if len(user_input) > 8:
print('The form is too long. It can\'t be more than eight-characters long.')
errors.append('yes')
if 'yes' in errors:
good_to_go = 'no'
errors = []
print()
user_input = input('Re-enter: ')
print()
else:
good_to_go = 'yes'
return user_input
def user_input_handling_function_third():
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
yes_or_no = ['yes', 'no']
while good_to_go == 'no':
if user_input not in yes_or_no:
print('You have to answer yes or no.')
errors.append('yes')
if 'yes' in errors:
good_to_go = 'no'
errors = []
print()
user_input = input('Re-enter: ')
print()
else:
good_to_go = 'yes'
return user_input
def user_input_handling_function_fourth(dictionary):
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
while good_to_go == 'no':
if user_input not in dictionary:
print('The form you entered does not match one of the forms in your termal_dictionary. Each form in your')
print('termal_dictionary is a name (key) that has an associated definition (value) that is a list of terms')
print('that all have the same form as the name (key).')
errors.append('yes')
if 'yes' in errors:
good_to_go = 'no'
errors = []
print()
user_input = input('Re-enter: ')
print()
else:
good_to_go = 'yes'
return user_input
def user_input_handling_function_eighth():
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-']
while good_to_go == 'no':
if user_input == 'None':
user_input = None
return user_input
else:
for inner in user_input:
if inner not in digits:
print('The number must be an integer that consists of digits. For example: 1, -2, etc. or the keyword:')
print('None.')
errors.append('yes')
break
if 'yes' in errors:
good_to_go = 'no'
errors = []
print()
user_input = input('Re-enter: ')
print()
else:
good_to_go = 'yes'
return int(user_input)
def user_input_handling_function_ninth():
print()
user_input = input('Enter: ')
print()
term = ''
lict = []
for element in user_input:
if element != ' ':
term = term + element
else:
lict.append(term)
term = ''
lict.append(term)
return lict
def user_input_handling_function_tenth(dictionary):
user_input = user_input_handling_function_ninth()
good_to_go = 'no'
errors = []
while good_to_go == 'no':
string = ''
lict = []
for element in user_input:
string = string + element
for key in dictionary:
for element in dictionary[key]:
lict.append(element)
for element in string:
if element not in lict:
print('One of your unwanted characters or combination of characters does not match the characters you')
print('entered earlier.')
errors.append('yes')
break
if 'yes' in errors:
print()
user_input = input('Re-enter: ')
print()
good_to_go = 'no'
errors = []
else:
good_to_go = 'yes'
return user_input
def print_vertical_lict(lict):
for element in lict:
print(element)
def print_horizontal_lict(lict):
string = ''
for element in lict:
string = string + str(element) + ', '
print(string)
print()
def write_vertical_lict(file_name, lict):
file = open(file_name, 'w')
for element in lict:
element = str(element) + '\n'
file.write(element)
file.close()
def write_horizontal_lict(file_name, lict):
if '.txt' not in file_name:
file_name = file_name + '.txt'
row = ''
for index in range(len(lict)):
lict[index] = str(lict[index]) + ', '
if len(row + lict[index]) > 100:
lict[index - 1] = lict[index - 1] + '\n'
row = lict[index]
else:
row = row + lict[index]
file = open(file_name, 'w')
for term in lict:
file.write(term)
file.close()
| true
| true
|
790c2779c0128cf69b56a25da075f3e3f2db55a4
| 61,949
|
py
|
Python
|
venv/lib/python2.7/site-packages/ansible/modules/packaging/os/yum.py
|
aburan28/ansible-devops-pipeline
|
50aa801632ca0828c16faac55732f1e79085f932
|
[
"Apache-2.0"
] | 1
|
2019-04-16T21:23:15.000Z
|
2019-04-16T21:23:15.000Z
|
venv/lib/python2.7/site-packages/ansible/modules/packaging/os/yum.py
|
aburan28/ansible-devops-pipeline
|
50aa801632ca0828c16faac55732f1e79085f932
|
[
"Apache-2.0"
] | 5
|
2020-02-26T20:10:50.000Z
|
2021-09-23T23:23:18.000Z
|
venv/lib/python2.7/site-packages/ansible/modules/packaging/os/yum.py
|
aburan28/ansible-devops-pipeline
|
50aa801632ca0828c16faac55732f1e79085f932
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Red Hat, Inc
# Written by Seth Vidal <skvidal at fedoraproject.org>
# Copyright: (c) 2014, Epic Games, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: yum
version_added: historical
short_description: Manages packages with the I(yum) package manager
description:
- Installs, upgrade, downgrades, removes, and lists packages and groups with the I(yum) package manager.
- This module only works on Python 2. If you require Python 3 support see the M(dnf) module.
options:
use_backend:
description:
- This module supports C(yum) (as it always has), this is known as C(yum3)/C(YUM3)/C(yum-deprecated) by
upstream yum developers. As of Ansible 2.7+, this module also supports C(YUM4), which is the
"new yum" and it has an C(dnf) backend.
- By default, this module will select the backend based on the C(ansible_pkg_mgr) fact.
required: false
default: "auto"
choices: [ auto, yum, yum4, dnf ]
version_added: "2.7"
name:
description:
- A package name or package specifier with version, like C(name-1.0).
- If a previous version is specified, the task also needs to turn C(allow_downgrade) on.
See the C(allow_downgrade) documentation for caveats with downgrading packages.
- When using state=latest, this can be C('*') which means run C(yum -y update).
- You can also pass a url or a local path to a rpm file (using state=present).
To operate on several packages this can accept a comma separated string of packages or (as of 2.0) a list of packages.
aliases: [ pkg ]
exclude:
description:
- Package name(s) to exclude when state=present, or latest
version_added: "2.0"
list:
description:
- "Package name to run the equivalent of yum list <package> against. In addition to listing packages,
use can also list the following: C(installed), C(updates), C(available) and C(repos)."
state:
description:
- Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package.
- C(present) and C(installed) will simply ensure that a desired package is installed.
- C(latest) will update the specified package if it's not of the latest available version.
- C(absent) and C(removed) will remove the specified package.
- Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is¬
enabled for this module, then C(absent) is inferred.
choices: [ absent, installed, latest, present, removed ]
enablerepo:
description:
- I(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a C(",").
- As of Ansible 2.7, this can alternatively be a list instead of C(",")
separated string
version_added: "0.9"
disablerepo:
description:
- I(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a C(",").
- As of Ansible 2.7, this can alternatively be a list instead of C(",")
separated string
version_added: "0.9"
conf_file:
description:
- The remote yum configuration file to use for the transaction.
version_added: "0.6"
disable_gpg_check:
description:
- Whether to disable the GPG checking of signatures of packages being
installed. Has an effect only if state is I(present) or I(latest).
type: bool
default: "no"
version_added: "1.2"
skip_broken:
description:
- Skip packages with broken dependencies(devsolve) and are causing problems.
type: bool
default: "no"
version_added: "2.3"
update_cache:
description:
- Force yum to check if cache is out of date and redownload if needed.
Has an effect only if state is I(present) or I(latest).
type: bool
default: "no"
aliases: [ expire-cache ]
version_added: "1.9"
validate_certs:
description:
- This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
- Prior to 2.1 the code worked as if this was set to C(yes).
type: bool
default: "yes"
version_added: "2.1"
update_only:
description:
- When using latest, only update installed packages. Do not install packages.
- Has an effect only if state is I(latest)
required: false
default: "no"
type: bool
version_added: "2.5"
installroot:
description:
- Specifies an alternative installroot, relative to which all packages
will be installed.
default: "/"
version_added: "2.3"
security:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked security related.
type: bool
default: "no"
version_added: "2.4"
bugfix:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked bugfix related.
required: false
default: "no"
version_added: "2.6"
allow_downgrade:
description:
- Specify if the named package and version is allowed to downgrade
a maybe already installed higher version of that package.
Note that setting allow_downgrade=True can make this module
behave in a non-idempotent way. The task could end up with a set
of packages that does not match the complete list of specified
packages to install (because dependencies between the downgraded
package and others can cause changes to the packages which were
in the earlier transaction).
type: bool
default: "no"
version_added: "2.4"
enable_plugin:
description:
- I(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
required: false
version_added: "2.5"
disable_plugin:
description:
- I(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
required: false
version_added: "2.5"
releasever:
description:
- Specifies an alternative release from which all packages will be
installed.
required: false
version_added: "2.7"
default: null
autoremove:
description:
- If C(yes), removes all "leaf" packages from the system that were originally
installed as dependencies of user-installed packages but which are no longer
required by any such package. Should be used alone or when state is I(absent)
- "NOTE: This feature requires yum >= 3.4.3 (RHEL/CentOS 7+)"
type: bool
default: false
version_added: "2.7"
disable_excludes:
description:
- Disable the excludes defined in YUM config files.
- If set to C(all), disables all excludes.
- If set to C(main), disable excludes defined in [main] in yum.conf.
- If set to C(repoid), disable excludes defined for given repo id.
required: false
version_added: "2.7"
download_only:
description:
- Only download the packages, do not install them.
required: false
default: "no"
type: bool
version_added: "2.7"
notes:
- When used with a `loop:` each package will be processed individually,
it is much more efficient to pass the list directly to the `name` option.
- In versions prior to 1.9.2 this module installed and removed each package
given to the yum module separately. This caused problems when packages
specified by filename or url had to be installed or removed together. In
1.9.2 this was fixed so that packages are installed in one yum
transaction. However, if one of the packages adds a new yum repository
that the other packages come from (such as epel-release) then that package
needs to be installed in a separate task. This mimics yum's command line
behaviour.
- 'Yum itself has two types of groups. "Package groups" are specified in the
rpm itself while "environment groups" are specified in a separate file
(usually by the distribution). Unfortunately, this division becomes
apparent to ansible users because ansible needs to operate on the group
of packages in a single transaction and yum requires groups to be specified
in different ways when used in that way. Package groups are specified as
"@development-tools" and environment groups are "@^gnome-desktop-environment".
Use the "yum group list" command to see which category of group the group
you want to install falls into.'
# informational: requirements for nodes
requirements:
- yum
author:
- Ansible Core Team
- Seth Vidal
- Eduard Snesarev (@verm666)
- Berend De Schouwer (@berenddeschouwer)
- Abhijeet Kasurde (@Akasurde)
- Adam Miller (@maxamillion)
'''
EXAMPLES = '''
- name: install the latest version of Apache
yum:
name: httpd
state: latest
- name: ensure a list of packages installed
yum:
name: "{{ packages }}"
vars:
packages:
- httpd
- httpd-tools
- name: remove the Apache package
yum:
name: httpd
state: absent
- name: install the latest version of Apache from the testing repo
yum:
name: httpd
enablerepo: testing
state: present
- name: install one specific version of Apache
yum:
name: httpd-2.2.29-1.4.amzn1
state: present
- name: upgrade all packages
yum:
name: '*'
state: latest
- name: upgrade all packages, excluding kernel & foo related packages
yum:
name: '*'
state: latest
exclude: kernel*,foo*
- name: install the nginx rpm from a remote repo
yum:
name: http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm
state: present
- name: install nginx rpm from a local file
yum:
name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
state: present
- name: install the 'Development tools' package group
yum:
name: "@Development tools"
state: present
- name: install the 'Gnome desktop' environment group
yum:
name: "@^gnome-desktop-environment"
state: present
- name: List ansible packages and register result to print with debug later.
yum:
list: ansible
register: result
- name: Install package with multiple repos enabled
yum:
name: sos
enablerepo: "epel,ol7_latest"
- name: Install package with multiple repos disabled
yum:
name: sos
disablerepo: "epel,ol7_latest"
- name: Install a list of packages
yum:
name:
- nginx
- postgresql
- postgresql-server
state: present
- name: Download the nginx package but do not install it
yum:
name:
- nginx
state: latest
download_only: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
import os
import re
import tempfile
try:
import rpm
HAS_RPM_PYTHON = True
except ImportError:
HAS_RPM_PYTHON = False
try:
import yum
HAS_YUM_PYTHON = True
except ImportError:
HAS_YUM_PYTHON = False
try:
from yum.misc import find_unfinished_transactions, find_ts_remaining
from rpmUtils.miscutils import splitFilename, compareEVR
transaction_helpers = True
except ImportError:
transaction_helpers = False
from contextlib import contextmanager
def_qf = "%{epoch}:%{name}-%{version}-%{release}.%{arch}"
rpmbin = None
# 64k. Number of bytes to read at a time when manually downloading pkgs via a url
BUFSIZE = 65536
class YumModule(YumDnf):
"""
Yum Ansible module back-end implementation
"""
def __init__(self, module):
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
# This populates instance vars for all argument spec params
super(YumModule, self).__init__(module)
def fetch_rpm_from_url(self, spec):
# FIXME: Remove this once this PR is merged:
# https://github.com/ansible/ansible/pull/19172
# download package so that we can query it
package_name, dummy = os.path.splitext(str(spec.rsplit('/', 1)[1]))
package_file = tempfile.NamedTemporaryFile(dir=self.module.tmpdir, prefix=package_name, suffix='.rpm', delete=False)
self.module.add_cleanup_file(package_file.name)
try:
rsp, info = fetch_url(self.module, spec)
if not rsp:
self.module.fail_json(msg="Failure downloading %s, %s" % (spec, info['msg']))
data = rsp.read(BUFSIZE)
while data:
package_file.write(data)
data = rsp.read(BUFSIZE)
package_file.close()
except Exception as e:
self.module.fail_json(msg="Failure downloading %s, %s" % (spec, to_native(e)))
return package_file.name
def yum_base(self):
my = yum.YumBase()
my.preconf.debuglevel = 0
my.preconf.errorlevel = 0
my.preconf.plugins = True
my.preconf.enabled_plugins = self.enable_plugin
my.preconf.disabled_plugins = self.disable_plugin
if self.releasever:
my.preconf.releasever = self.releasever
if self.installroot != '/':
# do not setup installroot by default, because of error
# CRITICAL:yum.cli:Config Error: Error accessing file for config file:////etc/yum.conf
# in old yum version (like in CentOS 6.6)
my.preconf.root = self.installroot
my.conf.installroot = self.installroot
if self.conf_file and os.path.exists(self.conf_file):
my.preconf.fn = self.conf_file
if os.geteuid() != 0:
if hasattr(my, 'setCacheDir'):
my.setCacheDir()
else:
cachedir = yum.misc.getCacheDir()
my.repos.setCacheDir(cachedir)
my.conf.cache = 0
if self.disable_excludes:
my.conf.disable_excludes = self.disable_excludes
return my
def po_to_envra(self, po):
if hasattr(po, 'ui_envra'):
return po.ui_envra
return '%s:%s-%s-%s.%s' % (po.epoch, po.name, po.version, po.release, po.arch)
def is_group_env_installed(self, name):
name_lower = name.lower()
my = self.yum_base()
if yum.__version_info__ >= (3, 4):
groups_list = my.doGroupLists(return_evgrps=True)
else:
groups_list = my.doGroupLists()
# list of the installed groups on the first index
groups = groups_list[0]
for group in groups:
if name_lower.endswith(group.name.lower()) or name_lower.endswith(group.groupid.lower()):
return True
if yum.__version_info__ >= (3, 4):
# list of the installed env_groups on the third index
envs = groups_list[2]
for env in envs:
if name_lower.endswith(env.name.lower()) or name_lower.endswith(env.environmentid.lower()):
return True
return False
def is_installed(self, repoq, pkgspec, qf=None, is_pkg=False):
if qf is None:
qf = "%{epoch}:%{name}-%{version}-%{release}.%{arch}\n"
if not repoq:
pkgs = []
try:
my = self.yum_base()
for rid in self.disablerepo:
my.repos.disableRepo(rid)
for rid in self.enablerepo:
my.repos.enableRepo(rid)
e, m, _ = my.rpmdb.matchPackageNames([pkgspec])
pkgs = e + m
if not pkgs and not is_pkg:
pkgs.extend(my.returnInstalledPackagesByDep(pkgspec))
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
return [self.po_to_envra(p) for p in pkgs]
else:
global rpmbin
if not rpmbin:
rpmbin = self.module.get_bin_path('rpm', required=True)
cmd = [rpmbin, '-q', '--qf', qf, pkgspec]
if self.installroot != '/':
cmd.extend(['--root', self.installroot])
# rpm localizes messages and we're screen scraping so make sure we use
# the C locale
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
if rc != 0 and 'is not installed' not in out:
self.module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err))
if 'is not installed' in out:
out = ''
pkgs = [p for p in out.replace('(none)', '0').split('\n') if p.strip()]
if not pkgs and not is_pkg:
cmd = [rpmbin, '-q', '--qf', qf, '--whatprovides', pkgspec]
if self.installroot != '/':
cmd.extend(['--root', self.installroot])
rc2, out2, err2 = self.module.run_command(cmd, environ_update=lang_env)
else:
rc2, out2, err2 = (0, '', '')
if rc2 != 0 and 'no package provides' not in out2:
self.module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err + err2))
if 'no package provides' in out2:
out2 = ''
pkgs += [p for p in out2.replace('(none)', '0').split('\n') if p.strip()]
return pkgs
return []
def is_available(self, repoq, pkgspec, qf=def_qf):
if not repoq:
pkgs = []
try:
my = self.yum_base()
for rid in self.disablerepo:
my.repos.disableRepo(rid)
for rid in self.enablerepo:
my.repos.enableRepo(rid)
e, m, _ = my.pkgSack.matchPackageNames([pkgspec])
pkgs = e + m
if not pkgs:
pkgs.extend(my.returnPackagesByDep(pkgspec))
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
return [self.po_to_envra(p) for p in pkgs]
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--qf", qf, pkgspec]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
return [p for p in out.split('\n') if p.strip()]
else:
self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
return []
def is_update(self, repoq, pkgspec, qf=def_qf):
if not repoq:
pkgs = []
updates = []
try:
my = self.yum_base()
for rid in self.disablerepo:
my.repos.disableRepo(rid)
for rid in self.enablerepo:
my.repos.enableRepo(rid)
pkgs = my.returnPackagesByDep(pkgspec) + my.returnInstalledPackagesByDep(pkgspec)
if not pkgs:
e, m, _ = my.pkgSack.matchPackageNames([pkgspec])
pkgs = e + m
updates = my.doPackageLists(pkgnarrow='updates').updates
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
retpkgs = (pkg for pkg in pkgs if pkg in updates)
return set(self.po_to_envra(p) for p in retpkgs)
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
return set(p for p in out.split('\n') if p.strip())
else:
self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
return set()
def what_provides(self, repoq, req_spec, qf=def_qf):
if not repoq:
pkgs = []
try:
my = self.yum_base()
for rid in self.disablerepo:
my.repos.disableRepo(rid)
for rid in self.enablerepo:
my.repos.enableRepo(rid)
try:
pkgs = my.returnPackagesByDep(req_spec) + my.returnInstalledPackagesByDep(req_spec)
except Exception as e:
# If a repo with `repo_gpgcheck=1` is added and the repo GPG
# key was never accepted, quering this repo will throw an
# error: 'repomd.xml signature could not be verified'. In that
# situation we need to run `yum -y makecache` which will accept
# the key and try again.
if 'repomd.xml signature could not be verified' in to_native(e):
self.module.run_command(self.yum_basecmd + ['makecache'])
pkgs = my.returnPackagesByDep(req_spec) + my.returnInstalledPackagesByDep(req_spec)
else:
raise
if not pkgs:
e, m, _ = my.pkgSack.matchPackageNames([req_spec])
pkgs.extend(e)
pkgs.extend(m)
e, m, _ = my.rpmdb.matchPackageNames([req_spec])
pkgs.extend(e)
pkgs.extend(m)
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
return set(self.po_to_envra(p) for p in pkgs)
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec]
rc, out, err = self.module.run_command(cmd)
cmd = myrepoq + ["--qf", qf, req_spec]
rc2, out2, err2 = self.module.run_command(cmd)
if rc == 0 and rc2 == 0:
out += out2
pkgs = set([p for p in out.split('\n') if p.strip()])
if not pkgs:
pkgs = self.is_installed(repoq, req_spec, qf=qf)
return pkgs
else:
self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
return set()
def transaction_exists(self, pkglist):
"""
checks the package list to see if any packages are
involved in an incomplete transaction
"""
conflicts = []
if not transaction_helpers:
return conflicts
# first, we create a list of the package 'nvreas'
# so we can compare the pieces later more easily
pkglist_nvreas = (splitFilename(pkg) for pkg in pkglist)
# next, we build the list of packages that are
# contained within an unfinished transaction
unfinished_transactions = find_unfinished_transactions()
for trans in unfinished_transactions:
steps = find_ts_remaining(trans)
for step in steps:
# the action is install/erase/etc., but we only
# care about the package spec contained in the step
(action, step_spec) = step
(n, v, r, e, a) = splitFilename(step_spec)
# and see if that spec is in the list of packages
# requested for installation/updating
for pkg in pkglist_nvreas:
# if the name and arch match, we're going to assume
# this package is part of a pending transaction
# the label is just for display purposes
label = "%s-%s" % (n, a)
if n == pkg[0] and a == pkg[4]:
if label not in conflicts:
conflicts.append("%s-%s" % (n, a))
break
return conflicts
def local_envra(self, path):
"""return envra of a local rpm passed in"""
ts = rpm.TransactionSet()
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
fd = os.open(path, os.O_RDONLY)
try:
header = ts.hdrFromFdno(fd)
except rpm.error as e:
return None
finally:
os.close(fd)
return '%s:%s-%s-%s.%s' % (
header[rpm.RPMTAG_EPOCH] or '0',
header[rpm.RPMTAG_NAME],
header[rpm.RPMTAG_VERSION],
header[rpm.RPMTAG_RELEASE],
header[rpm.RPMTAG_ARCH]
)
@contextmanager
def set_env_proxy(self):
# setting system proxy environment and saving old, if exists
my = self.yum_base()
namepass = ""
proxy_url = ""
scheme = ["http", "https"]
old_proxy_env = [os.getenv("http_proxy"), os.getenv("https_proxy")]
try:
if my.conf.proxy:
if my.conf.proxy_username:
namepass = namepass + my.conf.proxy_username
proxy_url = my.conf.proxy
if my.conf.proxy_password:
namepass = namepass + ":" + my.conf.proxy_password
elif '@' in my.conf.proxy:
namepass = my.conf.proxy.split('@')[0].split('//')[-1]
proxy_url = my.conf.proxy.replace("{0}@".format(namepass), "")
if namepass:
namepass = namepass + '@'
for item in scheme:
os.environ[item + "_proxy"] = re.sub(
r"(http://)",
r"\1" + namepass, proxy_url
)
yield
except yum.Errors.YumBaseError:
raise
finally:
# revert back to previously system configuration
for item in scheme:
if os.getenv("{0}_proxy".format(item)):
del os.environ["{0}_proxy".format(item)]
if old_proxy_env[0]:
os.environ["http_proxy"] = old_proxy_env[0]
if old_proxy_env[1]:
os.environ["https_proxy"] = old_proxy_env[1]
def pkg_to_dict(self, pkgstr):
if pkgstr.strip():
n, e, v, r, a, repo = pkgstr.split('|')
else:
return {'error_parsing': pkgstr}
d = {
'name': n,
'arch': a,
'epoch': e,
'release': r,
'version': v,
'repo': repo,
'envra': '%s:%s-%s-%s.%s' % (e, n, v, r, a)
}
if repo == 'installed':
d['yumstate'] = 'installed'
else:
d['yumstate'] = 'available'
return d
def repolist(self, repoq, qf="%{repoid}"):
cmd = repoq + ["--qf", qf, "-a"]
rc, out, _ = self.module.run_command(cmd)
if rc == 0:
return set(p for p in out.split('\n') if p.strip())
else:
return []
def list_stuff(self, repoquerybin, stuff):
qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}"
# is_installed goes through rpm instead of repoquery so it needs a slightly different format
is_installed_qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|installed\n"
repoq = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
if self.disablerepo:
repoq.extend(['--disablerepo', ','.join(self.disablerepo)])
if self.enablerepo:
repoq.extend(['--enablerepo', ','.join(self.enablerepo)])
if self.installroot != '/':
repoq.extend(['--installroot', self.installroot])
if self.conf_file and os.path.exists(self.conf_file):
repoq += ['-c', self.conf_file]
if stuff == 'installed':
return [self.pkg_to_dict(p) for p in sorted(self.is_installed(repoq, '-a', qf=is_installed_qf)) if p.strip()]
if stuff == 'updates':
return [self.pkg_to_dict(p) for p in sorted(self.is_update(repoq, '-a', qf=qf)) if p.strip()]
if stuff == 'available':
return [self.pkg_to_dict(p) for p in sorted(self.is_available(repoq, '-a', qf=qf)) if p.strip()]
if stuff == 'repos':
return [dict(repoid=name, state='enabled') for name in sorted(self.repolist(repoq)) if name.strip()]
return [
self.pkg_to_dict(p) for p in
sorted(self.is_installed(repoq, stuff, qf=is_installed_qf) + self.is_available(repoq, stuff, qf=qf))
if p.strip()
]
def exec_install(self, items, action, pkgs, res):
cmd = self.yum_basecmd + [action] + pkgs
if self.module.check_mode:
self.module.exit_json(changed=True, results=res['results'], changes=dict(installed=pkgs))
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
if rc == 1:
for spec in items:
# Fail on invalid urls:
if ('://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)):
err = 'Package at %s could not be installed' % spec
self.module.fail_json(changed=False, msg=err, rc=rc)
res['rc'] = rc
res['results'].append(out)
res['msg'] += err
res['changed'] = True
if ('Nothing to do' in out and rc == 0) or ('does not have any packages' in err):
res['changed'] = False
if rc != 0:
res['changed'] = False
self.module.fail_json(**res)
# Fail if yum prints 'No space left on device' because that means some
# packages failed executing their post install scripts because of lack of
# free space (e.g. kernel package couldn't generate initramfs). Note that
# yum can still exit with rc=0 even if some post scripts didn't execute
# correctly.
if 'No space left on device' in (out or err):
res['changed'] = False
res['msg'] = 'No space left on device'
self.module.fail_json(**res)
# FIXME - if we did an install - go and check the rpmdb to see if it actually installed
# look for each pkg in rpmdb
# look for each pkg via obsoletes
return res
def install(self, items, repoq):
pkgs = []
downgrade_pkgs = []
res = {}
res['results'] = []
res['msg'] = ''
res['rc'] = 0
res['changed'] = False
for spec in items:
pkg = None
downgrade_candidate = False
# check if pkgspec is installed (if possible for idempotence)
if spec.endswith('.rpm'):
if '://' not in spec and not os.path.exists(spec):
res['msg'] += "No RPM file matching '%s' found on system" % spec
res['results'].append("No RPM file matching '%s' found on system" % spec)
res['rc'] = 127 # Ensure the task fails in with-loop
self.module.fail_json(**res)
if '://' in spec:
with self.set_env_proxy():
package = self.fetch_rpm_from_url(spec)
else:
package = spec
# most common case is the pkg is already installed
envra = self.local_envra(package)
if envra is None:
self.module.fail_json(msg="Failed to get nevra information from RPM package: %s" % spec)
installed_pkgs = self.is_installed(repoq, envra)
if installed_pkgs:
res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], package))
continue
(name, ver, rel, epoch, arch) = splitFilename(envra)
installed_pkgs = self.is_installed(repoq, name)
# case for two same envr but differrent archs like x86_64 and i686
if len(installed_pkgs) == 2:
(cur_name0, cur_ver0, cur_rel0, cur_epoch0, cur_arch0) = splitFilename(installed_pkgs[0])
(cur_name1, cur_ver1, cur_rel1, cur_epoch1, cur_arch1) = splitFilename(installed_pkgs[1])
cur_epoch0 = cur_epoch0 or '0'
cur_epoch1 = cur_epoch1 or '0'
compare = compareEVR((cur_epoch0, cur_ver0, cur_rel0), (cur_epoch1, cur_ver1, cur_rel1))
if compare == 0 and cur_arch0 != cur_arch1:
for installed_pkg in installed_pkgs:
if installed_pkg.endswith(arch):
installed_pkgs = [installed_pkg]
if len(installed_pkgs) == 1:
installed_pkg = installed_pkgs[0]
(cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(installed_pkg)
cur_epoch = cur_epoch or '0'
compare = compareEVR((cur_epoch, cur_ver, cur_rel), (epoch, ver, rel))
# compare > 0 -> higher version is installed
# compare == 0 -> exact version is installed
# compare < 0 -> lower version is installed
if compare > 0 and self.allow_downgrade:
downgrade_candidate = True
elif compare >= 0:
continue
# else: if there are more installed packages with the same name, that would mean
# kernel, gpg-pubkey or like, so just let yum deal with it and try to install it
pkg = package
# groups
elif spec.startswith('@'):
if self.is_group_env_installed(spec):
continue
pkg = spec
# range requires or file-requires or pkgname :(
else:
# most common case is the pkg is already installed and done
# short circuit all the bs - and search for it as a pkg in is_installed
# if you find it then we're done
if not set(['*', '?']).intersection(set(spec)):
installed_pkgs = self.is_installed(repoq, spec, is_pkg=True)
if installed_pkgs:
res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], spec))
continue
# look up what pkgs provide this
pkglist = self.what_provides(repoq, spec)
if not pkglist:
res['msg'] += "No package matching '%s' found available, installed or updated" % spec
res['results'].append("No package matching '%s' found available, installed or updated" % spec)
res['rc'] = 126 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# if any of the packages are involved in a transaction, fail now
# so that we don't hang on the yum operation later
conflicts = self.transaction_exists(pkglist)
if conflicts:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
res['rc'] = 125 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# if any of them are installed
# then nothing to do
found = False
for this in pkglist:
if self.is_installed(repoq, this, is_pkg=True):
found = True
res['results'].append('%s providing %s is already installed' % (this, spec))
break
# if the version of the pkg you have installed is not in ANY repo, but there are
# other versions in the repos (both higher and lower) then the previous checks won't work.
# so we check one more time. This really only works for pkgname - not for file provides or virt provides
# but virt provides should be all caught in what_provides on its own.
# highly irritating
if not found:
if self.is_installed(repoq, spec):
found = True
res['results'].append('package providing %s is already installed' % (spec))
if found:
continue
# Downgrade - The yum install command will only install or upgrade to a spec version, it will
# not install an older version of an RPM even if specified by the install spec. So we need to
# determine if this is a downgrade, and then use the yum downgrade command to install the RPM.
if self.allow_downgrade:
for package in pkglist:
# Get the NEVRA of the requested package using pkglist instead of spec because pkglist
# contains consistently-formatted package names returned by yum, rather than user input
# that is often not parsed correctly by splitFilename().
(name, ver, rel, epoch, arch) = splitFilename(package)
# Check if any version of the requested package is installed
inst_pkgs = self.is_installed(repoq, name, is_pkg=True)
if inst_pkgs:
(cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(inst_pkgs[0])
compare = compareEVR((cur_epoch, cur_ver, cur_rel), (epoch, ver, rel))
if compare > 0:
downgrade_candidate = True
else:
downgrade_candidate = False
break
# If package needs to be installed/upgraded/downgraded, then pass in the spec
# we could get here if nothing provides it but that's not
# the error we're catching here
pkg = spec
if downgrade_candidate and self.allow_downgrade:
downgrade_pkgs.append(pkg)
else:
pkgs.append(pkg)
if downgrade_pkgs:
res = self.exec_install(items, 'downgrade', downgrade_pkgs, res)
if pkgs:
res = self.exec_install(items, 'install', pkgs, res)
return res
def remove(self, items, repoq):
pkgs = []
res = {}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
for pkg in items:
if pkg.startswith('@'):
installed = self.is_group_env_installed(pkg)
else:
installed = self.is_installed(repoq, pkg)
if installed:
pkgs.append(pkg)
else:
res['results'].append('%s is not installed' % pkg)
if pkgs:
if self.module.check_mode:
self.module.exit_json(changed=True, results=res['results'], changes=dict(removed=pkgs))
# run an actual yum transaction
if self.autoremove:
cmd = self.yum_basecmd + ["autoremove"] + pkgs
else:
cmd = self.yum_basecmd + ["remove"] + pkgs
rc, out, err = self.module.run_command(cmd)
res['rc'] = rc
res['results'].append(out)
res['msg'] = err
if rc != 0:
if self.autoremove:
if 'No such command' not in out:
self.module.fail_json(msg='Version of YUM too old for autoremove: Requires yum 3.4.3 (RHEL/CentOS 7+)')
else:
self.module.fail_json(**res)
# compile the results into one batch. If anything is changed
# then mark changed
# at the end - if we've end up failed then fail out of the rest
# of the process
# at this point we check to see if the pkg is no longer present
for pkg in pkgs:
if pkg.startswith('@'):
installed = self.is_group_env_installed(pkg)
else:
installed = self.is_installed(repoq, pkg)
if installed:
self.module.fail_json(**res)
res['changed'] = True
return res
def run_check_update(self):
# run check-update to see if we have packages pending
rc, out, err = self.module.run_command(self.yum_basecmd + ['check-update'])
return rc, out, err
@staticmethod
def parse_check_update(check_update_output):
updates = {}
obsoletes = {}
# remove incorrect new lines in longer columns in output from yum check-update
# yum line wrapping can move the repo to the next line
#
# Meant to filter out sets of lines like:
# some_looooooooooooooooooooooooooooooooooooong_package_name 1:1.2.3-1.el7
# some-repo-label
#
# But it also needs to avoid catching lines like:
# Loading mirror speeds from cached hostfile
#
# ceph.x86_64 1:11.2.0-0.el7 ceph
# preprocess string and filter out empty lines so the regex below works
out = re.sub(r'\n[^\w]\W+(.*)', r' \1', check_update_output)
available_updates = out.split('\n')
# build update dictionary
for line in available_updates:
line = line.split()
# ignore irrelevant lines
# '*' in line matches lines like mirror lists:
# * base: mirror.corbina.net
# len(line) != 3 or 6 could be junk or a continuation
# len(line) = 6 is package obsoletes
#
# FIXME: what is the '.' not in line conditional for?
if '*' in line or len(line) not in [3, 6] or '.' not in line[0]:
continue
else:
pkg, version, repo = line[0], line[1], line[2]
name, dist = pkg.rsplit('.', 1)
updates.update({name: {'version': version, 'dist': dist, 'repo': repo}})
if len(line) == 6:
obsolete_pkg, obsolete_version, obsolete_repo = line[3], line[4], line[5]
obsolete_name, obsolete_dist = obsolete_pkg.rsplit('.', 1)
obsoletes.update({obsolete_name: {'version': obsolete_version, 'dist': obsolete_dist, 'repo': obsolete_repo}})
return updates, obsoletes
def latest(self, items, repoq):
res = {}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
pkgs = {}
pkgs['update'] = []
pkgs['install'] = []
updates = {}
obsoletes = {}
update_all = False
cmd = None
# determine if we're doing an update all
if '*' in items:
update_all = True
rc, out, err = self.run_check_update()
if rc == 0 and update_all:
res['results'].append('Nothing to do here, all packages are up to date')
return res
elif rc == 100:
updates, obsoletes = self.parse_check_update(out)
elif rc == 1:
res['msg'] = err
res['rc'] = rc
self.module.fail_json(**res)
if update_all:
cmd = self.yum_basecmd + ['update']
will_update = set(updates.keys())
will_update_from_other_package = dict()
else:
will_update = set()
will_update_from_other_package = dict()
for spec in items:
# some guess work involved with groups. update @<group> will install the group if missing
if spec.startswith('@'):
pkgs['update'].append(spec)
will_update.add(spec)
continue
# check if pkgspec is installed (if possible for idempotence)
# localpkg
elif spec.endswith('.rpm') and '://' not in spec:
if not os.path.exists(spec):
res['msg'] += "No RPM file matching '%s' found on system" % spec
res['results'].append("No RPM file matching '%s' found on system" % spec)
res['rc'] = 127 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# get the pkg e:name-v-r.arch
envra = self.local_envra(spec)
if envra is None:
self.module.fail_json(msg="Failed to get nevra information from RPM package: %s" % spec)
# local rpm files can't be updated
if self.is_installed(repoq, envra):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
continue
# URL
elif '://' in spec:
# download package so that we can check if it's already installed
with self.set_env_proxy():
package = self.fetch_rpm_from_url(spec)
envra = self.local_envra(package)
if envra is None:
self.module.fail_json(msg="Failed to get nevra information from RPM package: %s" % spec)
# local rpm files can't be updated
if self.is_installed(repoq, envra):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
continue
# dep/pkgname - find it
else:
if self.is_installed(repoq, spec):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
pkglist = self.what_provides(repoq, spec)
# FIXME..? may not be desirable to throw an exception here if a single package is missing
if not pkglist:
res['msg'] += "No package matching '%s' found available, installed or updated" % spec
res['results'].append("No package matching '%s' found available, installed or updated" % spec)
res['rc'] = 126 # Ensure the task fails in with-loop
self.module.fail_json(**res)
nothing_to_do = True
for pkg in pkglist:
if spec in pkgs['install'] and self.is_available(repoq, pkg):
nothing_to_do = False
break
# this contains the full NVR and spec could contain wildcards
# or virtual provides (like "python-*" or "smtp-daemon") while
# updates contains name only.
pkgname, _, _, _, _ = splitFilename(pkg)
if spec in pkgs['update'] and pkgname in updates:
nothing_to_do = False
will_update.add(spec)
# Massage the updates list
if spec != pkgname:
# For reporting what packages would be updated more
# succinctly
will_update_from_other_package[spec] = pkgname
break
if not self.is_installed(repoq, spec) and self.update_only:
res['results'].append("Packages providing %s not installed due to update_only specified" % spec)
continue
if nothing_to_do:
res['results'].append("All packages providing %s are up to date" % spec)
continue
# if any of the packages are involved in a transaction, fail now
# so that we don't hang on the yum operation later
conflicts = self.transaction_exists(pkglist)
if conflicts:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
res['results'].append("The following packages have pending transactions: %s" % ", ".join(conflicts))
res['rc'] = 128 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# check_mode output
if self.module.check_mode:
to_update = []
for w in will_update:
if w.startswith('@'):
to_update.append((w, None))
elif w not in updates:
other_pkg = will_update_from_other_package[w]
to_update.append(
(
w,
'because of (at least) %s-%s.%s from %s' % (
other_pkg,
updates[other_pkg]['version'],
updates[other_pkg]['dist'],
updates[other_pkg]['repo']
)
)
)
else:
to_update.append((w, '%s.%s from %s' % (updates[w]['version'], updates[w]['dist'], updates[w]['repo'])))
if self.update_only:
res['changes'] = dict(installed=[], updated=to_update)
else:
res['changes'] = dict(installed=pkgs['install'], updated=to_update)
if will_update or pkgs['install']:
res['changed'] = True
if obsoletes:
res['obsoletes'] = obsoletes
return res
# run commands
if cmd: # update all
rc, out, err = self.module.run_command(cmd)
res['changed'] = True
elif self.update_only:
if pkgs['update']:
cmd = self.yum_basecmd + ['update'] + pkgs['update']
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
out_lower = out.strip().lower()
if not out_lower.endswith("no packages marked for update") and \
not out_lower.endswith("nothing to do"):
res['changed'] = True
else:
rc, out, err = [0, '', '']
elif pkgs['install'] or will_update and not self.update_only:
cmd = self.yum_basecmd + ['install'] + pkgs['install'] + pkgs['update']
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
out_lower = out.strip().lower()
if not out_lower.endswith("no packages marked for update") and \
not out_lower.endswith("nothing to do"):
res['changed'] = True
else:
rc, out, err = [0, '', '']
res['rc'] = rc
res['msg'] += err
res['results'].append(out)
if rc:
res['failed'] = True
if obsoletes:
res['obsoletes'] = obsoletes
return res
def ensure(self, repoq):
pkgs = self.names
# autoremove was provided without `name`
if not self.names and self.autoremove:
pkgs = []
self.state = 'absent'
if self.conf_file and os.path.exists(self.conf_file):
self.yum_basecmd += ['-c', self.conf_file]
if repoq:
repoq += ['-c', self.conf_file]
if self.skip_broken:
self.yum_basecmd.extend(['--skip-broken'])
if self.disablerepo:
self.yum_basecmd.extend(['--disablerepo=%s' % ','.join(self.disablerepo)])
if self.enablerepo:
self.yum_basecmd.extend(['--enablerepo=%s' % ','.join(self.enablerepo)])
if self.enable_plugin:
self.yum_basecmd.extend(['--enableplugin', ','.join(self.enable_plugin)])
if self.disable_plugin:
self.yum_basecmd.extend(['--disableplugin', ','.join(self.disable_plugin)])
if self.exclude:
e_cmd = ['--exclude=%s' % ','.join(self.exclude)]
self.yum_basecmd.extend(e_cmd)
if self.disable_excludes:
self.yum_basecmd.extend(['--disableexcludes=%s' % self.disable_excludes])
if self.download_only:
self.yum_basecmd.extend(['--downloadonly'])
if self.installroot != '/':
# do not setup installroot by default, because of error
# CRITICAL:yum.cli:Config Error: Error accessing file for config file:////etc/yum.conf
# in old yum version (like in CentOS 6.6)
e_cmd = ['--installroot=%s' % self.installroot]
self.yum_basecmd.extend(e_cmd)
if self.state in ('installed', 'present', 'latest'):
""" The need of this entire if conditional has to be chalanged
this function is the ensure function that is called
in the main section.
This conditional tends to disable/enable repo for
install present latest action, same actually
can be done for remove and absent action
As solution I would advice to cal
try: my.repos.disableRepo(disablerepo)
and
try: my.repos.enableRepo(enablerepo)
right before any yum_cmd is actually called regardless
of yum action.
Please note that enable/disablerepo options are general
options, this means that we can call those with any action
option. https://linux.die.net/man/8/yum
This docstring will be removed together when issue: #21619
will be solved.
This has been triggered by: #19587
"""
if self.update_cache:
self.module.run_command(self.yum_basecmd + ['clean', 'expire-cache'])
my = self.yum_base()
try:
if self.disablerepo:
for rid in self.disablerepo:
my.repos.disableRepo(rid)
current_repos = my.repos.repos.keys()
if self.enablerepo:
try:
for rid in self.enablerepo:
my.repos.enableRepo(rid)
new_repos = my.repos.repos.keys()
for i in new_repos:
if i not in current_repos:
rid = my.repos.getRepo(i)
a = rid.repoXML.repoid # nopep8 - https://github.com/ansible/ansible/pull/21475#pullrequestreview-22404868
current_repos = new_repos
except yum.Errors.YumBaseError as e:
self.module.fail_json(msg="Error setting/accessing repos: %s" % to_native(e))
except yum.Errors.YumBaseError as e:
self.module.fail_json(msg="Error accessing repos: %s" % to_native(e))
if self.state == 'latest' or self.update_only:
if self.disable_gpg_check:
self.yum_basecmd.append('--nogpgcheck')
if self.security:
self.yum_basecmd.append('--security')
if self.bugfix:
self.yum_basecmd.append('--bugfix')
res = self.latest(pkgs, repoq)
elif self.state in ('installed', 'present'):
if self.disable_gpg_check:
self.yum_basecmd.append('--nogpgcheck')
res = self.install(pkgs, repoq)
elif self.state in ('removed', 'absent'):
res = self.remove(pkgs, repoq)
else:
# should be caught by AnsibleModule argument_spec
self.module.fail_json(
msg="we should never get here unless this all failed",
changed=False,
results='',
errors='unexpected state'
)
return res
@staticmethod
def has_yum():
return HAS_YUM_PYTHON
def run(self):
"""
actually execute the module code backend
"""
error_msgs = []
if not HAS_RPM_PYTHON:
error_msgs.append('The Python 2 bindings for rpm are needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
if not HAS_YUM_PYTHON:
error_msgs.append('The Python 2 yum module is needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
if self.disable_excludes and yum.__version_info__ < (3, 4):
self.module.fail_json(msg="'disable_includes' is available in yum version 3.4 and onwards.")
if error_msgs:
self.module.fail_json(msg='. '.join(error_msgs))
# fedora will redirect yum to dnf, which has incompatibilities
# with how this module expects yum to operate. If yum-deprecated
# is available, use that instead to emulate the old behaviors.
if self.module.get_bin_path('yum-deprecated'):
yumbin = self.module.get_bin_path('yum-deprecated')
else:
yumbin = self.module.get_bin_path('yum')
# need debug level 2 to get 'Nothing to do' for groupinstall.
self.yum_basecmd = [yumbin, '-d', '2', '-y']
repoquerybin = self.module.get_bin_path('repoquery', required=False)
if self.install_repoquery and not repoquerybin and not self.module.check_mode:
yum_path = self.module.get_bin_path('yum')
if yum_path:
self.module.run_command('%s -y install yum-utils' % yum_path)
repoquerybin = self.module.get_bin_path('repoquery', required=False)
if self.list:
if not repoquerybin:
self.module.fail_json(msg="repoquery is required to use list= with this module. Please install the yum-utils package.")
results = {'results': self.list_stuff(repoquerybin, self.list)}
else:
# If rhn-plugin is installed and no rhn-certificate is available on
# the system then users will see an error message using the yum API.
# Use repoquery in those cases.
my = self.yum_base()
# A sideeffect of accessing conf is that the configuration is
# loaded and plugins are discovered
my.conf
repoquery = None
try:
yum_plugins = my.plugins._plugins
except AttributeError:
pass
else:
if 'rhnplugin' in yum_plugins:
if repoquerybin:
repoquery = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
if self.installroot != '/':
repoquery.extend(['--installroot', self.installroot])
results = self.ensure(repoquery)
if repoquery:
results['msg'] = '%s %s' % (
results.get('msg', ''),
'Warning: Due to potential bad behaviour with rhnplugin and certificates, used slower repoquery calls instead of Yum API.'
)
self.module.exit_json(**results)
def main():
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
yumdnf_argument_spec['argument_spec']['use_backend'] = dict(default='auto', choices=['auto', 'yum', 'yum4', 'dnf'])
module = AnsibleModule(
**yumdnf_argument_spec
)
module_implementation = YumModule(module)
module_implementation.run()
if __name__ == '__main__':
main()
| 39.533504
| 160
| 0.557216
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: yum
version_added: historical
short_description: Manages packages with the I(yum) package manager
description:
- Installs, upgrade, downgrades, removes, and lists packages and groups with the I(yum) package manager.
- This module only works on Python 2. If you require Python 3 support see the M(dnf) module.
options:
use_backend:
description:
- This module supports C(yum) (as it always has), this is known as C(yum3)/C(YUM3)/C(yum-deprecated) by
upstream yum developers. As of Ansible 2.7+, this module also supports C(YUM4), which is the
"new yum" and it has an C(dnf) backend.
- By default, this module will select the backend based on the C(ansible_pkg_mgr) fact.
required: false
default: "auto"
choices: [ auto, yum, yum4, dnf ]
version_added: "2.7"
name:
description:
- A package name or package specifier with version, like C(name-1.0).
- If a previous version is specified, the task also needs to turn C(allow_downgrade) on.
See the C(allow_downgrade) documentation for caveats with downgrading packages.
- When using state=latest, this can be C('*') which means run C(yum -y update).
- You can also pass a url or a local path to a rpm file (using state=present).
To operate on several packages this can accept a comma separated string of packages or (as of 2.0) a list of packages.
aliases: [ pkg ]
exclude:
description:
- Package name(s) to exclude when state=present, or latest
version_added: "2.0"
list:
description:
- "Package name to run the equivalent of yum list <package> against. In addition to listing packages,
use can also list the following: C(installed), C(updates), C(available) and C(repos)."
state:
description:
- Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package.
- C(present) and C(installed) will simply ensure that a desired package is installed.
- C(latest) will update the specified package if it's not of the latest available version.
- C(absent) and C(removed) will remove the specified package.
- Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is¬
enabled for this module, then C(absent) is inferred.
choices: [ absent, installed, latest, present, removed ]
enablerepo:
description:
- I(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a C(",").
- As of Ansible 2.7, this can alternatively be a list instead of C(",")
separated string
version_added: "0.9"
disablerepo:
description:
- I(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a C(",").
- As of Ansible 2.7, this can alternatively be a list instead of C(",")
separated string
version_added: "0.9"
conf_file:
description:
- The remote yum configuration file to use for the transaction.
version_added: "0.6"
disable_gpg_check:
description:
- Whether to disable the GPG checking of signatures of packages being
installed. Has an effect only if state is I(present) or I(latest).
type: bool
default: "no"
version_added: "1.2"
skip_broken:
description:
- Skip packages with broken dependencies(devsolve) and are causing problems.
type: bool
default: "no"
version_added: "2.3"
update_cache:
description:
- Force yum to check if cache is out of date and redownload if needed.
Has an effect only if state is I(present) or I(latest).
type: bool
default: "no"
aliases: [ expire-cache ]
version_added: "1.9"
validate_certs:
description:
- This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
- Prior to 2.1 the code worked as if this was set to C(yes).
type: bool
default: "yes"
version_added: "2.1"
update_only:
description:
- When using latest, only update installed packages. Do not install packages.
- Has an effect only if state is I(latest)
required: false
default: "no"
type: bool
version_added: "2.5"
installroot:
description:
- Specifies an alternative installroot, relative to which all packages
will be installed.
default: "/"
version_added: "2.3"
security:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked security related.
type: bool
default: "no"
version_added: "2.4"
bugfix:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked bugfix related.
required: false
default: "no"
version_added: "2.6"
allow_downgrade:
description:
- Specify if the named package and version is allowed to downgrade
a maybe already installed higher version of that package.
Note that setting allow_downgrade=True can make this module
behave in a non-idempotent way. The task could end up with a set
of packages that does not match the complete list of specified
packages to install (because dependencies between the downgraded
package and others can cause changes to the packages which were
in the earlier transaction).
type: bool
default: "no"
version_added: "2.4"
enable_plugin:
description:
- I(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
required: false
version_added: "2.5"
disable_plugin:
description:
- I(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
required: false
version_added: "2.5"
releasever:
description:
- Specifies an alternative release from which all packages will be
installed.
required: false
version_added: "2.7"
default: null
autoremove:
description:
- If C(yes), removes all "leaf" packages from the system that were originally
installed as dependencies of user-installed packages but which are no longer
required by any such package. Should be used alone or when state is I(absent)
- "NOTE: This feature requires yum >= 3.4.3 (RHEL/CentOS 7+)"
type: bool
default: false
version_added: "2.7"
disable_excludes:
description:
- Disable the excludes defined in YUM config files.
- If set to C(all), disables all excludes.
- If set to C(main), disable excludes defined in [main] in yum.conf.
- If set to C(repoid), disable excludes defined for given repo id.
required: false
version_added: "2.7"
download_only:
description:
- Only download the packages, do not install them.
required: false
default: "no"
type: bool
version_added: "2.7"
notes:
- When used with a `loop:` each package will be processed individually,
it is much more efficient to pass the list directly to the `name` option.
- In versions prior to 1.9.2 this module installed and removed each package
given to the yum module separately. This caused problems when packages
specified by filename or url had to be installed or removed together. In
1.9.2 this was fixed so that packages are installed in one yum
transaction. However, if one of the packages adds a new yum repository
that the other packages come from (such as epel-release) then that package
needs to be installed in a separate task. This mimics yum's command line
behaviour.
- 'Yum itself has two types of groups. "Package groups" are specified in the
rpm itself while "environment groups" are specified in a separate file
(usually by the distribution). Unfortunately, this division becomes
apparent to ansible users because ansible needs to operate on the group
of packages in a single transaction and yum requires groups to be specified
in different ways when used in that way. Package groups are specified as
"@development-tools" and environment groups are "@^gnome-desktop-environment".
Use the "yum group list" command to see which category of group the group
you want to install falls into.'
# informational: requirements for nodes
requirements:
- yum
author:
- Ansible Core Team
- Seth Vidal
- Eduard Snesarev (@verm666)
- Berend De Schouwer (@berenddeschouwer)
- Abhijeet Kasurde (@Akasurde)
- Adam Miller (@maxamillion)
'''
EXAMPLES = '''
- name: install the latest version of Apache
yum:
name: httpd
state: latest
- name: ensure a list of packages installed
yum:
name: "{{ packages }}"
vars:
packages:
- httpd
- httpd-tools
- name: remove the Apache package
yum:
name: httpd
state: absent
- name: install the latest version of Apache from the testing repo
yum:
name: httpd
enablerepo: testing
state: present
- name: install one specific version of Apache
yum:
name: httpd-2.2.29-1.4.amzn1
state: present
- name: upgrade all packages
yum:
name: '*'
state: latest
- name: upgrade all packages, excluding kernel & foo related packages
yum:
name: '*'
state: latest
exclude: kernel*,foo*
- name: install the nginx rpm from a remote repo
yum:
name: http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm
state: present
- name: install nginx rpm from a local file
yum:
name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
state: present
- name: install the 'Development tools' package group
yum:
name: "@Development tools"
state: present
- name: install the 'Gnome desktop' environment group
yum:
name: "@^gnome-desktop-environment"
state: present
- name: List ansible packages and register result to print with debug later.
yum:
list: ansible
register: result
- name: Install package with multiple repos enabled
yum:
name: sos
enablerepo: "epel,ol7_latest"
- name: Install package with multiple repos disabled
yum:
name: sos
disablerepo: "epel,ol7_latest"
- name: Install a list of packages
yum:
name:
- nginx
- postgresql
- postgresql-server
state: present
- name: Download the nginx package but do not install it
yum:
name:
- nginx
state: latest
download_only: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
import os
import re
import tempfile
try:
import rpm
HAS_RPM_PYTHON = True
except ImportError:
HAS_RPM_PYTHON = False
try:
import yum
HAS_YUM_PYTHON = True
except ImportError:
HAS_YUM_PYTHON = False
try:
from yum.misc import find_unfinished_transactions, find_ts_remaining
from rpmUtils.miscutils import splitFilename, compareEVR
transaction_helpers = True
except ImportError:
transaction_helpers = False
from contextlib import contextmanager
def_qf = "%{epoch}:%{name}-%{version}-%{release}.%{arch}"
rpmbin = None
BUFSIZE = 65536
class YumModule(YumDnf):
def __init__(self, module):
super(YumModule, self).__init__(module)
def fetch_rpm_from_url(self, spec):
package_name, dummy = os.path.splitext(str(spec.rsplit('/', 1)[1]))
package_file = tempfile.NamedTemporaryFile(dir=self.module.tmpdir, prefix=package_name, suffix='.rpm', delete=False)
self.module.add_cleanup_file(package_file.name)
try:
rsp, info = fetch_url(self.module, spec)
if not rsp:
self.module.fail_json(msg="Failure downloading %s, %s" % (spec, info['msg']))
data = rsp.read(BUFSIZE)
while data:
package_file.write(data)
data = rsp.read(BUFSIZE)
package_file.close()
except Exception as e:
self.module.fail_json(msg="Failure downloading %s, %s" % (spec, to_native(e)))
return package_file.name
def yum_base(self):
my = yum.YumBase()
my.preconf.debuglevel = 0
my.preconf.errorlevel = 0
my.preconf.plugins = True
my.preconf.enabled_plugins = self.enable_plugin
my.preconf.disabled_plugins = self.disable_plugin
if self.releasever:
my.preconf.releasever = self.releasever
if self.installroot != '/':
my.preconf.root = self.installroot
my.conf.installroot = self.installroot
if self.conf_file and os.path.exists(self.conf_file):
my.preconf.fn = self.conf_file
if os.geteuid() != 0:
if hasattr(my, 'setCacheDir'):
my.setCacheDir()
else:
cachedir = yum.misc.getCacheDir()
my.repos.setCacheDir(cachedir)
my.conf.cache = 0
if self.disable_excludes:
my.conf.disable_excludes = self.disable_excludes
return my
def po_to_envra(self, po):
if hasattr(po, 'ui_envra'):
return po.ui_envra
return '%s:%s-%s-%s.%s' % (po.epoch, po.name, po.version, po.release, po.arch)
def is_group_env_installed(self, name):
name_lower = name.lower()
my = self.yum_base()
if yum.__version_info__ >= (3, 4):
groups_list = my.doGroupLists(return_evgrps=True)
else:
groups_list = my.doGroupLists()
groups = groups_list[0]
for group in groups:
if name_lower.endswith(group.name.lower()) or name_lower.endswith(group.groupid.lower()):
return True
if yum.__version_info__ >= (3, 4):
envs = groups_list[2]
for env in envs:
if name_lower.endswith(env.name.lower()) or name_lower.endswith(env.environmentid.lower()):
return True
return False
def is_installed(self, repoq, pkgspec, qf=None, is_pkg=False):
if qf is None:
qf = "%{epoch}:%{name}-%{version}-%{release}.%{arch}\n"
if not repoq:
pkgs = []
try:
my = self.yum_base()
for rid in self.disablerepo:
my.repos.disableRepo(rid)
for rid in self.enablerepo:
my.repos.enableRepo(rid)
e, m, _ = my.rpmdb.matchPackageNames([pkgspec])
pkgs = e + m
if not pkgs and not is_pkg:
pkgs.extend(my.returnInstalledPackagesByDep(pkgspec))
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
return [self.po_to_envra(p) for p in pkgs]
else:
global rpmbin
if not rpmbin:
rpmbin = self.module.get_bin_path('rpm', required=True)
cmd = [rpmbin, '-q', '--qf', qf, pkgspec]
if self.installroot != '/':
cmd.extend(['--root', self.installroot])
# the C locale
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
if rc != 0 and 'is not installed' not in out:
self.module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err))
if 'is not installed' in out:
out = ''
pkgs = [p for p in out.replace('(none)', '0').split('\n') if p.strip()]
if not pkgs and not is_pkg:
cmd = [rpmbin, '-q', '--qf', qf, '--whatprovides', pkgspec]
if self.installroot != '/':
cmd.extend(['--root', self.installroot])
rc2, out2, err2 = self.module.run_command(cmd, environ_update=lang_env)
else:
rc2, out2, err2 = (0, '', '')
if rc2 != 0 and 'no package provides' not in out2:
self.module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err + err2))
if 'no package provides' in out2:
out2 = ''
pkgs += [p for p in out2.replace('(none)', '0').split('\n') if p.strip()]
return pkgs
return []
def is_available(self, repoq, pkgspec, qf=def_qf):
if not repoq:
pkgs = []
try:
my = self.yum_base()
for rid in self.disablerepo:
my.repos.disableRepo(rid)
for rid in self.enablerepo:
my.repos.enableRepo(rid)
e, m, _ = my.pkgSack.matchPackageNames([pkgspec])
pkgs = e + m
if not pkgs:
pkgs.extend(my.returnPackagesByDep(pkgspec))
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
return [self.po_to_envra(p) for p in pkgs]
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--qf", qf, pkgspec]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
return [p for p in out.split('\n') if p.strip()]
else:
self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
return []
def is_update(self, repoq, pkgspec, qf=def_qf):
if not repoq:
pkgs = []
updates = []
try:
my = self.yum_base()
for rid in self.disablerepo:
my.repos.disableRepo(rid)
for rid in self.enablerepo:
my.repos.enableRepo(rid)
pkgs = my.returnPackagesByDep(pkgspec) + my.returnInstalledPackagesByDep(pkgspec)
if not pkgs:
e, m, _ = my.pkgSack.matchPackageNames([pkgspec])
pkgs = e + m
updates = my.doPackageLists(pkgnarrow='updates').updates
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
retpkgs = (pkg for pkg in pkgs if pkg in updates)
return set(self.po_to_envra(p) for p in retpkgs)
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
return set(p for p in out.split('\n') if p.strip())
else:
self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
return set()
def what_provides(self, repoq, req_spec, qf=def_qf):
if not repoq:
pkgs = []
try:
my = self.yum_base()
for rid in self.disablerepo:
my.repos.disableRepo(rid)
for rid in self.enablerepo:
my.repos.enableRepo(rid)
try:
pkgs = my.returnPackagesByDep(req_spec) + my.returnInstalledPackagesByDep(req_spec)
except Exception as e:
# If a repo with `repo_gpgcheck=1` is added and the repo GPG
# key was never accepted, quering this repo will throw an
# error: 'repomd.xml signature could not be verified'. In that
# situation we need to run `yum -y makecache` which will accept
# the key and try again.
if 'repomd.xml signature could not be verified' in to_native(e):
self.module.run_command(self.yum_basecmd + ['makecache'])
pkgs = my.returnPackagesByDep(req_spec) + my.returnInstalledPackagesByDep(req_spec)
else:
raise
if not pkgs:
e, m, _ = my.pkgSack.matchPackageNames([req_spec])
pkgs.extend(e)
pkgs.extend(m)
e, m, _ = my.rpmdb.matchPackageNames([req_spec])
pkgs.extend(e)
pkgs.extend(m)
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
return set(self.po_to_envra(p) for p in pkgs)
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec]
rc, out, err = self.module.run_command(cmd)
cmd = myrepoq + ["--qf", qf, req_spec]
rc2, out2, err2 = self.module.run_command(cmd)
if rc == 0 and rc2 == 0:
out += out2
pkgs = set([p for p in out.split('\n') if p.strip()])
if not pkgs:
pkgs = self.is_installed(repoq, req_spec, qf=qf)
return pkgs
else:
self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
return set()
def transaction_exists(self, pkglist):
conflicts = []
if not transaction_helpers:
return conflicts
# first, we create a list of the package 'nvreas'
# so we can compare the pieces later more easily
pkglist_nvreas = (splitFilename(pkg) for pkg in pkglist)
# next, we build the list of packages that are
# contained within an unfinished transaction
unfinished_transactions = find_unfinished_transactions()
for trans in unfinished_transactions:
steps = find_ts_remaining(trans)
for step in steps:
# the action is install/erase/etc., but we only
# care about the package spec contained in the step
(action, step_spec) = step
(n, v, r, e, a) = splitFilename(step_spec)
# and see if that spec is in the list of packages
# requested for installation/updating
for pkg in pkglist_nvreas:
# if the name and arch match, we're going to assume
label = "%s-%s" % (n, a)
if n == pkg[0] and a == pkg[4]:
if label not in conflicts:
conflicts.append("%s-%s" % (n, a))
break
return conflicts
def local_envra(self, path):
ts = rpm.TransactionSet()
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
fd = os.open(path, os.O_RDONLY)
try:
header = ts.hdrFromFdno(fd)
except rpm.error as e:
return None
finally:
os.close(fd)
return '%s:%s-%s-%s.%s' % (
header[rpm.RPMTAG_EPOCH] or '0',
header[rpm.RPMTAG_NAME],
header[rpm.RPMTAG_VERSION],
header[rpm.RPMTAG_RELEASE],
header[rpm.RPMTAG_ARCH]
)
@contextmanager
def set_env_proxy(self):
my = self.yum_base()
namepass = ""
proxy_url = ""
scheme = ["http", "https"]
old_proxy_env = [os.getenv("http_proxy"), os.getenv("https_proxy")]
try:
if my.conf.proxy:
if my.conf.proxy_username:
namepass = namepass + my.conf.proxy_username
proxy_url = my.conf.proxy
if my.conf.proxy_password:
namepass = namepass + ":" + my.conf.proxy_password
elif '@' in my.conf.proxy:
namepass = my.conf.proxy.split('@')[0].split('//')[-1]
proxy_url = my.conf.proxy.replace("{0}@".format(namepass), "")
if namepass:
namepass = namepass + '@'
for item in scheme:
os.environ[item + "_proxy"] = re.sub(
r"(http://)",
r"\1" + namepass, proxy_url
)
yield
except yum.Errors.YumBaseError:
raise
finally:
for item in scheme:
if os.getenv("{0}_proxy".format(item)):
del os.environ["{0}_proxy".format(item)]
if old_proxy_env[0]:
os.environ["http_proxy"] = old_proxy_env[0]
if old_proxy_env[1]:
os.environ["https_proxy"] = old_proxy_env[1]
def pkg_to_dict(self, pkgstr):
if pkgstr.strip():
n, e, v, r, a, repo = pkgstr.split('|')
else:
return {'error_parsing': pkgstr}
d = {
'name': n,
'arch': a,
'epoch': e,
'release': r,
'version': v,
'repo': repo,
'envra': '%s:%s-%s-%s.%s' % (e, n, v, r, a)
}
if repo == 'installed':
d['yumstate'] = 'installed'
else:
d['yumstate'] = 'available'
return d
def repolist(self, repoq, qf="%{repoid}"):
cmd = repoq + ["--qf", qf, "-a"]
rc, out, _ = self.module.run_command(cmd)
if rc == 0:
return set(p for p in out.split('\n') if p.strip())
else:
return []
def list_stuff(self, repoquerybin, stuff):
qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}"
is_installed_qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|installed\n"
repoq = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
if self.disablerepo:
repoq.extend(['--disablerepo', ','.join(self.disablerepo)])
if self.enablerepo:
repoq.extend(['--enablerepo', ','.join(self.enablerepo)])
if self.installroot != '/':
repoq.extend(['--installroot', self.installroot])
if self.conf_file and os.path.exists(self.conf_file):
repoq += ['-c', self.conf_file]
if stuff == 'installed':
return [self.pkg_to_dict(p) for p in sorted(self.is_installed(repoq, '-a', qf=is_installed_qf)) if p.strip()]
if stuff == 'updates':
return [self.pkg_to_dict(p) for p in sorted(self.is_update(repoq, '-a', qf=qf)) if p.strip()]
if stuff == 'available':
return [self.pkg_to_dict(p) for p in sorted(self.is_available(repoq, '-a', qf=qf)) if p.strip()]
if stuff == 'repos':
return [dict(repoid=name, state='enabled') for name in sorted(self.repolist(repoq)) if name.strip()]
return [
self.pkg_to_dict(p) for p in
sorted(self.is_installed(repoq, stuff, qf=is_installed_qf) + self.is_available(repoq, stuff, qf=qf))
if p.strip()
]
def exec_install(self, items, action, pkgs, res):
cmd = self.yum_basecmd + [action] + pkgs
if self.module.check_mode:
self.module.exit_json(changed=True, results=res['results'], changes=dict(installed=pkgs))
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
if rc == 1:
for spec in items:
if ('://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)):
err = 'Package at %s could not be installed' % spec
self.module.fail_json(changed=False, msg=err, rc=rc)
res['rc'] = rc
res['results'].append(out)
res['msg'] += err
res['changed'] = True
if ('Nothing to do' in out and rc == 0) or ('does not have any packages' in err):
res['changed'] = False
if rc != 0:
res['changed'] = False
self.module.fail_json(**res)
# yum can still exit with rc=0 even if some post scripts didn't execute
if 'No space left on device' in (out or err):
res['changed'] = False
res['msg'] = 'No space left on device'
self.module.fail_json(**res)
return res
def install(self, items, repoq):
pkgs = []
downgrade_pkgs = []
res = {}
res['results'] = []
res['msg'] = ''
res['rc'] = 0
res['changed'] = False
for spec in items:
pkg = None
downgrade_candidate = False
if spec.endswith('.rpm'):
if '://' not in spec and not os.path.exists(spec):
res['msg'] += "No RPM file matching '%s' found on system" % spec
res['results'].append("No RPM file matching '%s' found on system" % spec)
res['rc'] = 127
self.module.fail_json(**res)
if '://' in spec:
with self.set_env_proxy():
package = self.fetch_rpm_from_url(spec)
else:
package = spec
envra = self.local_envra(package)
if envra is None:
self.module.fail_json(msg="Failed to get nevra information from RPM package: %s" % spec)
installed_pkgs = self.is_installed(repoq, envra)
if installed_pkgs:
res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], package))
continue
(name, ver, rel, epoch, arch) = splitFilename(envra)
installed_pkgs = self.is_installed(repoq, name)
if len(installed_pkgs) == 2:
(cur_name0, cur_ver0, cur_rel0, cur_epoch0, cur_arch0) = splitFilename(installed_pkgs[0])
(cur_name1, cur_ver1, cur_rel1, cur_epoch1, cur_arch1) = splitFilename(installed_pkgs[1])
cur_epoch0 = cur_epoch0 or '0'
cur_epoch1 = cur_epoch1 or '0'
compare = compareEVR((cur_epoch0, cur_ver0, cur_rel0), (cur_epoch1, cur_ver1, cur_rel1))
if compare == 0 and cur_arch0 != cur_arch1:
for installed_pkg in installed_pkgs:
if installed_pkg.endswith(arch):
installed_pkgs = [installed_pkg]
if len(installed_pkgs) == 1:
installed_pkg = installed_pkgs[0]
(cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(installed_pkg)
cur_epoch = cur_epoch or '0'
compare = compareEVR((cur_epoch, cur_ver, cur_rel), (epoch, ver, rel))
if compare > 0 and self.allow_downgrade:
downgrade_candidate = True
elif compare >= 0:
continue
pkg = package
elif spec.startswith('@'):
if self.is_group_env_installed(spec):
continue
pkg = spec
else:
if not set(['*', '?']).intersection(set(spec)):
installed_pkgs = self.is_installed(repoq, spec, is_pkg=True)
if installed_pkgs:
res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], spec))
continue
# look up what pkgs provide this
pkglist = self.what_provides(repoq, spec)
if not pkglist:
res['msg'] += "No package matching '%s' found available, installed or updated" % spec
res['results'].append("No package matching '%s' found available, installed or updated" % spec)
res['rc'] = 126 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# if any of the packages are involved in a transaction, fail now
# so that we don't hang on the yum operation later
conflicts = self.transaction_exists(pkglist)
if conflicts:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
res['rc'] = 125
self.module.fail_json(**res)
found = False
for this in pkglist:
if self.is_installed(repoq, this, is_pkg=True):
found = True
res['results'].append('%s providing %s is already installed' % (this, spec))
break
# so we check one more time. This really only works for pkgname - not for file provides or virt provides
# but virt provides should be all caught in what_provides on its own.
# highly irritating
if not found:
if self.is_installed(repoq, spec):
found = True
res['results'].append('package providing %s is already installed' % (spec))
if found:
continue
# Downgrade - The yum install command will only install or upgrade to a spec version, it will
# not install an older version of an RPM even if specified by the install spec. So we need to
# determine if this is a downgrade, and then use the yum downgrade command to install the RPM.
if self.allow_downgrade:
for package in pkglist:
# Get the NEVRA of the requested package using pkglist instead of spec because pkglist
# contains consistently-formatted package names returned by yum, rather than user input
# that is often not parsed correctly by splitFilename().
(name, ver, rel, epoch, arch) = splitFilename(package)
# Check if any version of the requested package is installed
inst_pkgs = self.is_installed(repoq, name, is_pkg=True)
if inst_pkgs:
(cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(inst_pkgs[0])
compare = compareEVR((cur_epoch, cur_ver, cur_rel), (epoch, ver, rel))
if compare > 0:
downgrade_candidate = True
else:
downgrade_candidate = False
break
# If package needs to be installed/upgraded/downgraded, then pass in the spec
# we could get here if nothing provides it but that's not
pkg = spec
if downgrade_candidate and self.allow_downgrade:
downgrade_pkgs.append(pkg)
else:
pkgs.append(pkg)
if downgrade_pkgs:
res = self.exec_install(items, 'downgrade', downgrade_pkgs, res)
if pkgs:
res = self.exec_install(items, 'install', pkgs, res)
return res
def remove(self, items, repoq):
pkgs = []
res = {}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
for pkg in items:
if pkg.startswith('@'):
installed = self.is_group_env_installed(pkg)
else:
installed = self.is_installed(repoq, pkg)
if installed:
pkgs.append(pkg)
else:
res['results'].append('%s is not installed' % pkg)
if pkgs:
if self.module.check_mode:
self.module.exit_json(changed=True, results=res['results'], changes=dict(removed=pkgs))
# run an actual yum transaction
if self.autoremove:
cmd = self.yum_basecmd + ["autoremove"] + pkgs
else:
cmd = self.yum_basecmd + ["remove"] + pkgs
rc, out, err = self.module.run_command(cmd)
res['rc'] = rc
res['results'].append(out)
res['msg'] = err
if rc != 0:
if self.autoremove:
if 'No such command' not in out:
self.module.fail_json(msg='Version of YUM too old for autoremove: Requires yum 3.4.3 (RHEL/CentOS 7+)')
else:
self.module.fail_json(**res)
# compile the results into one batch. If anything is changed
# then mark changed
# at the end - if we've end up failed then fail out of the rest
for pkg in pkgs:
if pkg.startswith('@'):
installed = self.is_group_env_installed(pkg)
else:
installed = self.is_installed(repoq, pkg)
if installed:
self.module.fail_json(**res)
res['changed'] = True
return res
def run_check_update(self):
rc, out, err = self.module.run_command(self.yum_basecmd + ['check-update'])
return rc, out, err
@staticmethod
def parse_check_update(check_update_output):
updates = {}
obsoletes = {}
out = re.sub(r'\n[^\w]\W+(.*)', r' \1', check_update_output)
available_updates = out.split('\n')
for line in available_updates:
line = line.split()
if '*' in line or len(line) not in [3, 6] or '.' not in line[0]:
continue
else:
pkg, version, repo = line[0], line[1], line[2]
name, dist = pkg.rsplit('.', 1)
updates.update({name: {'version': version, 'dist': dist, 'repo': repo}})
if len(line) == 6:
obsolete_pkg, obsolete_version, obsolete_repo = line[3], line[4], line[5]
obsolete_name, obsolete_dist = obsolete_pkg.rsplit('.', 1)
obsoletes.update({obsolete_name: {'version': obsolete_version, 'dist': obsolete_dist, 'repo': obsolete_repo}})
return updates, obsoletes
def latest(self, items, repoq):
res = {}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
pkgs = {}
pkgs['update'] = []
pkgs['install'] = []
updates = {}
obsoletes = {}
update_all = False
cmd = None
if '*' in items:
update_all = True
rc, out, err = self.run_check_update()
if rc == 0 and update_all:
res['results'].append('Nothing to do here, all packages are up to date')
return res
elif rc == 100:
updates, obsoletes = self.parse_check_update(out)
elif rc == 1:
res['msg'] = err
res['rc'] = rc
self.module.fail_json(**res)
if update_all:
cmd = self.yum_basecmd + ['update']
will_update = set(updates.keys())
will_update_from_other_package = dict()
else:
will_update = set()
will_update_from_other_package = dict()
for spec in items:
# some guess work involved with groups. update @<group> will install the group if missing
if spec.startswith('@'):
pkgs['update'].append(spec)
will_update.add(spec)
continue
# check if pkgspec is installed (if possible for idempotence)
# localpkg
elif spec.endswith('.rpm') and '://' not in spec:
if not os.path.exists(spec):
res['msg'] += "No RPM file matching '%s' found on system" % spec
res['results'].append("No RPM file matching '%s' found on system" % spec)
res['rc'] = 127 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# get the pkg e:name-v-r.arch
envra = self.local_envra(spec)
if envra is None:
self.module.fail_json(msg="Failed to get nevra information from RPM package: %s" % spec)
# local rpm files can't be updated
if self.is_installed(repoq, envra):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
continue
elif '://' in spec:
with self.set_env_proxy():
package = self.fetch_rpm_from_url(spec)
envra = self.local_envra(package)
if envra is None:
self.module.fail_json(msg="Failed to get nevra information from RPM package: %s" % spec)
# local rpm files can't be updated
if self.is_installed(repoq, envra):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
continue
else:
if self.is_installed(repoq, spec):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
pkglist = self.what_provides(repoq, spec)
if not pkglist:
res['msg'] += "No package matching '%s' found available, installed or updated" % spec
res['results'].append("No package matching '%s' found available, installed or updated" % spec)
res['rc'] = 126
self.module.fail_json(**res)
nothing_to_do = True
for pkg in pkglist:
if spec in pkgs['install'] and self.is_available(repoq, pkg):
nothing_to_do = False
break
pkgname, _, _, _, _ = splitFilename(pkg)
if spec in pkgs['update'] and pkgname in updates:
nothing_to_do = False
will_update.add(spec)
if spec != pkgname:
will_update_from_other_package[spec] = pkgname
break
if not self.is_installed(repoq, spec) and self.update_only:
res['results'].append("Packages providing %s not installed due to update_only specified" % spec)
continue
if nothing_to_do:
res['results'].append("All packages providing %s are up to date" % spec)
continue
conflicts = self.transaction_exists(pkglist)
if conflicts:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
res['results'].append("The following packages have pending transactions: %s" % ", ".join(conflicts))
res['rc'] = 128 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# check_mode output
if self.module.check_mode:
to_update = []
for w in will_update:
if w.startswith('@'):
to_update.append((w, None))
elif w not in updates:
other_pkg = will_update_from_other_package[w]
to_update.append(
(
w,
'because of (at least) %s-%s.%s from %s' % (
other_pkg,
updates[other_pkg]['version'],
updates[other_pkg]['dist'],
updates[other_pkg]['repo']
)
)
)
else:
to_update.append((w, '%s.%s from %s' % (updates[w]['version'], updates[w]['dist'], updates[w]['repo'])))
if self.update_only:
res['changes'] = dict(installed=[], updated=to_update)
else:
res['changes'] = dict(installed=pkgs['install'], updated=to_update)
if will_update or pkgs['install']:
res['changed'] = True
if obsoletes:
res['obsoletes'] = obsoletes
return res
# run commands
if cmd: # update all
rc, out, err = self.module.run_command(cmd)
res['changed'] = True
elif self.update_only:
if pkgs['update']:
cmd = self.yum_basecmd + ['update'] + pkgs['update']
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
out_lower = out.strip().lower()
if not out_lower.endswith("no packages marked for update") and \
not out_lower.endswith("nothing to do"):
res['changed'] = True
else:
rc, out, err = [0, '', '']
elif pkgs['install'] or will_update and not self.update_only:
cmd = self.yum_basecmd + ['install'] + pkgs['install'] + pkgs['update']
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
out_lower = out.strip().lower()
if not out_lower.endswith("no packages marked for update") and \
not out_lower.endswith("nothing to do"):
res['changed'] = True
else:
rc, out, err = [0, '', '']
res['rc'] = rc
res['msg'] += err
res['results'].append(out)
if rc:
res['failed'] = True
if obsoletes:
res['obsoletes'] = obsoletes
return res
def ensure(self, repoq):
pkgs = self.names
# autoremove was provided without `name`
if not self.names and self.autoremove:
pkgs = []
self.state = 'absent'
if self.conf_file and os.path.exists(self.conf_file):
self.yum_basecmd += ['-c', self.conf_file]
if repoq:
repoq += ['-c', self.conf_file]
if self.skip_broken:
self.yum_basecmd.extend(['--skip-broken'])
if self.disablerepo:
self.yum_basecmd.extend(['--disablerepo=%s' % ','.join(self.disablerepo)])
if self.enablerepo:
self.yum_basecmd.extend(['--enablerepo=%s' % ','.join(self.enablerepo)])
if self.enable_plugin:
self.yum_basecmd.extend(['--enableplugin', ','.join(self.enable_plugin)])
if self.disable_plugin:
self.yum_basecmd.extend(['--disableplugin', ','.join(self.disable_plugin)])
if self.exclude:
e_cmd = ['--exclude=%s' % ','.join(self.exclude)]
self.yum_basecmd.extend(e_cmd)
if self.disable_excludes:
self.yum_basecmd.extend(['--disableexcludes=%s' % self.disable_excludes])
if self.download_only:
self.yum_basecmd.extend(['--downloadonly'])
if self.installroot != '/':
# do not setup installroot by default, because of error
# CRITICAL:yum.cli:Config Error: Error accessing file for config file:////etc/yum.conf
# in old yum version (like in CentOS 6.6)
e_cmd = ['--installroot=%s' % self.installroot]
self.yum_basecmd.extend(e_cmd)
if self.state in ('installed', 'present', 'latest'):
if self.update_cache:
self.module.run_command(self.yum_basecmd + ['clean', 'expire-cache'])
my = self.yum_base()
try:
if self.disablerepo:
for rid in self.disablerepo:
my.repos.disableRepo(rid)
current_repos = my.repos.repos.keys()
if self.enablerepo:
try:
for rid in self.enablerepo:
my.repos.enableRepo(rid)
new_repos = my.repos.repos.keys()
for i in new_repos:
if i not in current_repos:
rid = my.repos.getRepo(i)
a = rid.repoXML.repoid # nopep8 - https://github.com/ansible/ansible/pull/21475#pullrequestreview-22404868
current_repos = new_repos
except yum.Errors.YumBaseError as e:
self.module.fail_json(msg="Error setting/accessing repos: %s" % to_native(e))
except yum.Errors.YumBaseError as e:
self.module.fail_json(msg="Error accessing repos: %s" % to_native(e))
if self.state == 'latest' or self.update_only:
if self.disable_gpg_check:
self.yum_basecmd.append('--nogpgcheck')
if self.security:
self.yum_basecmd.append('--security')
if self.bugfix:
self.yum_basecmd.append('--bugfix')
res = self.latest(pkgs, repoq)
elif self.state in ('installed', 'present'):
if self.disable_gpg_check:
self.yum_basecmd.append('--nogpgcheck')
res = self.install(pkgs, repoq)
elif self.state in ('removed', 'absent'):
res = self.remove(pkgs, repoq)
else:
# should be caught by AnsibleModule argument_spec
self.module.fail_json(
msg="we should never get here unless this all failed",
changed=False,
results='',
errors='unexpected state'
)
return res
@staticmethod
def has_yum():
return HAS_YUM_PYTHON
def run(self):
error_msgs = []
if not HAS_RPM_PYTHON:
error_msgs.append('The Python 2 bindings for rpm are needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
if not HAS_YUM_PYTHON:
error_msgs.append('The Python 2 yum module is needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
if self.disable_excludes and yum.__version_info__ < (3, 4):
self.module.fail_json(msg="'disable_includes' is available in yum version 3.4 and onwards.")
if error_msgs:
self.module.fail_json(msg='. '.join(error_msgs))
# fedora will redirect yum to dnf, which has incompatibilities
# with how this module expects yum to operate. If yum-deprecated
# is available, use that instead to emulate the old behaviors.
if self.module.get_bin_path('yum-deprecated'):
yumbin = self.module.get_bin_path('yum-deprecated')
else:
yumbin = self.module.get_bin_path('yum')
# need debug level 2 to get 'Nothing to do' for groupinstall.
self.yum_basecmd = [yumbin, '-d', '2', '-y']
repoquerybin = self.module.get_bin_path('repoquery', required=False)
if self.install_repoquery and not repoquerybin and not self.module.check_mode:
yum_path = self.module.get_bin_path('yum')
if yum_path:
self.module.run_command('%s -y install yum-utils' % yum_path)
repoquerybin = self.module.get_bin_path('repoquery', required=False)
if self.list:
if not repoquerybin:
self.module.fail_json(msg="repoquery is required to use list= with this module. Please install the yum-utils package.")
results = {'results': self.list_stuff(repoquerybin, self.list)}
else:
# If rhn-plugin is installed and no rhn-certificate is available on
# the system then users will see an error message using the yum API.
# Use repoquery in those cases.
my = self.yum_base()
# A sideeffect of accessing conf is that the configuration is
# loaded and plugins are discovered
my.conf
repoquery = None
try:
yum_plugins = my.plugins._plugins
except AttributeError:
pass
else:
if 'rhnplugin' in yum_plugins:
if repoquerybin:
repoquery = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
if self.installroot != '/':
repoquery.extend(['--installroot', self.installroot])
results = self.ensure(repoquery)
if repoquery:
results['msg'] = '%s %s' % (
results.get('msg', ''),
'Warning: Due to potential bad behaviour with rhnplugin and certificates, used slower repoquery calls instead of Yum API.'
)
self.module.exit_json(**results)
def main():
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
yumdnf_argument_spec['argument_spec']['use_backend'] = dict(default='auto', choices=['auto', 'yum', 'yum4', 'dnf'])
module = AnsibleModule(
**yumdnf_argument_spec
)
module_implementation = YumModule(module)
module_implementation.run()
if __name__ == '__main__':
main()
| true
| true
|
790c27a56ffea18d938238326b7e66906fce3238
| 4,686
|
py
|
Python
|
experiments/steven/online-vae/pick_and_place/state_exp.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
experiments/steven/online-vae/pick_and_place/state_exp.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
experiments/steven/online-vae/pick_and_place/state_exp.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
import rlkit.misc.hyperparameter as hyp
from multiworld.envs.mujoco.cameras import init_sawyer_camera_v1
from multiworld.envs.mujoco.cameras import sawyer_pick_and_place_camera
from rlkit.launchers.launcher_util import run_experiment
from rlkit.torch.grill.launcher import grill_her_td3_full_experiment
import rlkit.torch.vae.vae_schedules as vae_schedules
from multiworld.envs.mujoco.sawyer_xyz.sawyer_pick_and_place \
import SawyerPickAndPlaceEnv, SawyerPickAndPlaceEnvYZ
from rlkit.envs.goal_generation.pickup_goal_dataset import \
generate_vae_dataset, get_image_presampled_goals_from_vae_env
from multiworld.envs.mujoco.cameras import \
sawyer_pick_and_place_camera, sawyer_pick_and_place_camera_slanted_angle
if __name__ == "__main__":
num_images = 1
variant = dict(
imsize=84,
double_algo=False,
env_id="SawyerPickupEnv-v0",
grill_variant=dict(
save_video=True,
save_video_period=50,
presample_goals=True,
generate_goal_dataset_fctn=get_image_presampled_goals_from_vae_env,
goal_generation_kwargs=dict(
num_presampled_goals=1000,
),
do_state_exp=True,
algo_kwargs=dict(
base_kwargs=dict(
num_epochs=505,
num_steps_per_epoch=1000,
num_steps_per_eval=1000,
min_num_steps_before_training=4000,
batch_size=128,
max_path_length=50,
discount=0.99,
num_updates_per_env_step=4,
collection_mode='online-parallel',
reward_scale=100,
),
td3_kwargs=dict(
tau=1e-2,
),
her_kwargs=dict(),
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
replay_buffer_kwargs=dict(
max_size=int(1e6),
fraction_goals_are_rollout_goals=0.0,
fraction_resampled_goals_are_env_goals=0.5,
),
algorithm='GRILL-HER-TD3',
normalize=False,
render=False,
exploration_noise=0.3,
exploration_type='ou',
training_mode='train',
testing_mode='test',
reward_params=dict(
type='latent_distance',
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
),
train_vae_variant=dict(
generate_vae_data_fctn=generate_vae_dataset,
dump_skew_debug_plots=False,
representation_size=16,
beta=0.5,
num_epochs=0,
generate_vae_dataset_kwargs=dict(
N=50,
oracle_dataset=True,
use_cached=True,
num_channels=3*num_images,
),
vae_kwargs=dict(
input_channels=3*num_images,
),
algo_kwargs=dict(
train_data_workers=4,
do_scatterplot=False,
lr=1e-3,
),
#beta_schedule_kwargs=dict(
# x_values=[0, 100, 200, 500],
# y_values=[0, 0, 5, 5],
#),
save_period=5,
),
)
search_space = {
'grill_variant.training_mode': ['train'],
'grill_variant.replay_kwargs.fraction_goals_are_rollout_goals': [0.0],
'grill_variant.algo_kwargs.base_kwargs.num_updates_per_env_step': [4],
'grill_variant.exploration_noise': [.3, .5],
'env_kwargs.random_init': [False],
'env_kwargs.action_scale': [.02],
'init_camera': [
sawyer_pick_and_place_camera,
],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
n_seeds = 4
mode = 'ec2'
exp_prefix = 'pickup-true-state-exp-rig-paper'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
grill_her_td3_full_experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
use_gpu=True,
# trial_dir_suffix='n1000-{}--zoomed-{}'.format(n1000, zoomed),
snapshot_gap=200,
snapshot_mode='gap_and_last',
num_exps_per_instance=2,
)
| 35.5
| 80
| 0.568289
|
import rlkit.misc.hyperparameter as hyp
from multiworld.envs.mujoco.cameras import init_sawyer_camera_v1
from multiworld.envs.mujoco.cameras import sawyer_pick_and_place_camera
from rlkit.launchers.launcher_util import run_experiment
from rlkit.torch.grill.launcher import grill_her_td3_full_experiment
import rlkit.torch.vae.vae_schedules as vae_schedules
from multiworld.envs.mujoco.sawyer_xyz.sawyer_pick_and_place \
import SawyerPickAndPlaceEnv, SawyerPickAndPlaceEnvYZ
from rlkit.envs.goal_generation.pickup_goal_dataset import \
generate_vae_dataset, get_image_presampled_goals_from_vae_env
from multiworld.envs.mujoco.cameras import \
sawyer_pick_and_place_camera, sawyer_pick_and_place_camera_slanted_angle
if __name__ == "__main__":
num_images = 1
variant = dict(
imsize=84,
double_algo=False,
env_id="SawyerPickupEnv-v0",
grill_variant=dict(
save_video=True,
save_video_period=50,
presample_goals=True,
generate_goal_dataset_fctn=get_image_presampled_goals_from_vae_env,
goal_generation_kwargs=dict(
num_presampled_goals=1000,
),
do_state_exp=True,
algo_kwargs=dict(
base_kwargs=dict(
num_epochs=505,
num_steps_per_epoch=1000,
num_steps_per_eval=1000,
min_num_steps_before_training=4000,
batch_size=128,
max_path_length=50,
discount=0.99,
num_updates_per_env_step=4,
collection_mode='online-parallel',
reward_scale=100,
),
td3_kwargs=dict(
tau=1e-2,
),
her_kwargs=dict(),
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
replay_buffer_kwargs=dict(
max_size=int(1e6),
fraction_goals_are_rollout_goals=0.0,
fraction_resampled_goals_are_env_goals=0.5,
),
algorithm='GRILL-HER-TD3',
normalize=False,
render=False,
exploration_noise=0.3,
exploration_type='ou',
training_mode='train',
testing_mode='test',
reward_params=dict(
type='latent_distance',
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
),
train_vae_variant=dict(
generate_vae_data_fctn=generate_vae_dataset,
dump_skew_debug_plots=False,
representation_size=16,
beta=0.5,
num_epochs=0,
generate_vae_dataset_kwargs=dict(
N=50,
oracle_dataset=True,
use_cached=True,
num_channels=3*num_images,
),
vae_kwargs=dict(
input_channels=3*num_images,
),
algo_kwargs=dict(
train_data_workers=4,
do_scatterplot=False,
lr=1e-3,
),
save_period=5,
),
)
search_space = {
'grill_variant.training_mode': ['train'],
'grill_variant.replay_kwargs.fraction_goals_are_rollout_goals': [0.0],
'grill_variant.algo_kwargs.base_kwargs.num_updates_per_env_step': [4],
'grill_variant.exploration_noise': [.3, .5],
'env_kwargs.random_init': [False],
'env_kwargs.action_scale': [.02],
'init_camera': [
sawyer_pick_and_place_camera,
],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
n_seeds = 4
mode = 'ec2'
exp_prefix = 'pickup-true-state-exp-rig-paper'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
grill_her_td3_full_experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
use_gpu=True,
snapshot_gap=200,
snapshot_mode='gap_and_last',
num_exps_per_instance=2,
)
| true
| true
|
790c27e45987e197ed450469d6e3791ee48c8be2
| 11,074
|
py
|
Python
|
uboot/tools/microcode-tool.py
|
bingchunjin/1806_SDK
|
d5ed0258fc22f60e00ec025b802d175f33da6e41
|
[
"MIT"
] | 31
|
2018-01-16T17:11:44.000Z
|
2022-03-16T13:51:24.000Z
|
uboot/tools/microcode-tool.py
|
bingchunjin/1806_SDK
|
d5ed0258fc22f60e00ec025b802d175f33da6e41
|
[
"MIT"
] | 4
|
2016-08-30T11:30:25.000Z
|
2020-12-27T09:58:07.000Z
|
uboot/tools/microcode-tool.py
|
bingchunjin/1806_SDK
|
d5ed0258fc22f60e00ec025b802d175f33da6e41
|
[
"MIT"
] | 30
|
2018-05-02T08:43:27.000Z
|
2022-01-23T03:25:54.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2014 Google, Inc
#
# SPDX-License-Identifier: GPL-2.0+
#
# Intel microcode update tool
from optparse import OptionParser
import os
import re
import struct
import sys
MICROCODE_DIR = 'arch/x86/dts/microcode'
class Microcode:
"""Holds information about the microcode for a particular model of CPU.
Attributes:
name: Name of the CPU this microcode is for, including any version
information (e.g. 'm12206a7_00000029')
model: Model code string (this is cpuid(1).eax, e.g. '206a7')
words: List of hex words containing the microcode. The first 16 words
are the public header.
"""
def __init__(self, name, data):
self.name = name
# Convert data into a list of hex words
self.words = []
for value in ''.join(data).split(','):
hexval = value.strip()
if hexval:
self.words.append(int(hexval, 0))
# The model is in the 4rd hex word
self.model = '%x' % self.words[3]
def ParseFile(fname):
"""Parse a micrcode.dat file and return the component parts
Args:
fname: Filename to parse
Returns:
3-Tuple:
date: String containing date from the file's header
license_text: List of text lines for the license file
microcodes: List of Microcode objects from the file
"""
re_date = re.compile('/\* *(.* [0-9]{4}) *\*/$')
re_license = re.compile('/[^-*+] *(.*)$')
re_name = re.compile('/\* *(.*)\.inc *\*/', re.IGNORECASE)
microcodes = {}
license_text = []
date = ''
data = []
name = None
with open(fname) as fd:
for line in fd:
line = line.rstrip()
m_date = re_date.match(line)
m_license = re_license.match(line)
m_name = re_name.match(line)
if m_name:
if name:
microcodes[name] = Microcode(name, data)
name = m_name.group(1).lower()
data = []
elif m_license:
license_text.append(m_license.group(1))
elif m_date:
date = m_date.group(1)
else:
data.append(line)
if name:
microcodes[name] = Microcode(name, data)
return date, license_text, microcodes
def ParseHeaderFiles(fname_list):
"""Parse a list of header files and return the component parts
Args:
fname_list: List of files to parse
Returns:
date: String containing date from the file's header
license_text: List of text lines for the license file
microcodes: List of Microcode objects from the file
"""
microcodes = {}
license_text = []
date = ''
name = None
for fname in fname_list:
name = os.path.basename(fname).lower()
name = os.path.splitext(name)[0]
data = []
with open(fname) as fd:
license_start = False
license_end = False
for line in fd:
line = line.rstrip()
if len(line) >= 2:
if line[0] == '/' and line[1] == '*':
license_start = True
continue
if line[0] == '*' and line[1] == '/':
license_end = True
continue
if license_start and not license_end:
# Ignore blank line
if len(line) > 0:
license_text.append(line)
continue
# Omit anything after the last comma
words = line.split(',')[:-1]
data += [word + ',' for word in words]
microcodes[name] = Microcode(name, data)
return date, license_text, microcodes
def List(date, microcodes, model):
"""List the available microcode chunks
Args:
date: Date of the microcode file
microcodes: Dict of Microcode objects indexed by name
model: Model string to search for, or None
"""
print 'Date: %s' % date
if model:
mcode_list, tried = FindMicrocode(microcodes, model.lower())
print 'Matching models %s:' % (', '.join(tried))
else:
print 'All models:'
mcode_list = [microcodes[m] for m in microcodes.keys()]
for mcode in mcode_list:
print '%-20s: model %s' % (mcode.name, mcode.model)
def FindMicrocode(microcodes, model):
"""Find all the microcode chunks which match the given model.
This model is something like 306a9 (the value returned in eax from
cpuid(1) when running on Intel CPUs). But we allow a partial match,
omitting the last 1 or two characters to allow many families to have the
same microcode.
If the model name is ambiguous we return a list of matches.
Args:
microcodes: Dict of Microcode objects indexed by name
model: String containing model name to find
Returns:
Tuple:
List of matching Microcode objects
List of abbreviations we tried
"""
# Allow a full name to be used
mcode = microcodes.get(model)
if mcode:
return [mcode], []
tried = []
found = []
for i in range(3):
abbrev = model[:-i] if i else model
tried.append(abbrev)
for mcode in microcodes.values():
if mcode.model.startswith(abbrev):
found.append(mcode)
if found:
break
return found, tried
def CreateFile(date, license_text, mcodes, outfile):
"""Create a microcode file in U-Boot's .dtsi format
Args:
date: String containing date of original microcode file
license: List of text lines for the license file
mcodes: Microcode objects to write (normally only 1)
outfile: Filename to write to ('-' for stdout)
"""
out = '''/*%s
* ---
* This is a device tree fragment. Use #include to add these properties to a
* node.
*
* Date: %s
*/
compatible = "intel,microcode";
intel,header-version = <%d>;
intel,update-revision = <%#x>;
intel,date-code = <%#x>;
intel,processor-signature = <%#x>;
intel,checksum = <%#x>;
intel,loader-revision = <%d>;
intel,processor-flags = <%#x>;
/* The first 48-bytes are the public header which repeats the above data */
data = <%s
\t>;'''
words = ''
add_comments = len(mcodes) > 1
for mcode in mcodes:
if add_comments:
words += '\n/* %s */' % mcode.name
for i in range(len(mcode.words)):
if not (i & 3):
words += '\n'
val = mcode.words[i]
# Change each word so it will be little-endian in the FDT
# This data is needed before RAM is available on some platforms so
# we cannot do an endianness swap on boot.
val = struct.unpack("<I", struct.pack(">I", val))[0]
words += '\t%#010x' % val
# Use the first microcode for the headers
mcode = mcodes[0]
# Take care to avoid adding a space before a tab
text = ''
for line in license_text:
if line[0] == '\t':
text += '\n *' + line
else:
text += '\n * ' + line
args = [text, date]
args += [mcode.words[i] for i in range(7)]
args.append(words)
if outfile == '-':
print out % tuple(args)
else:
if not outfile:
if not os.path.exists(MICROCODE_DIR):
print >> sys.stderr, "Creating directory '%s'" % MICROCODE_DIR
os.makedirs(MICROCODE_DIR)
outfile = os.path.join(MICROCODE_DIR, mcode.name + '.dtsi')
print >> sys.stderr, "Writing microcode for '%s' to '%s'" % (
', '.join([mcode.name for mcode in mcodes]), outfile)
with open(outfile, 'w') as fd:
print >> fd, out % tuple(args)
def MicrocodeTool():
"""Run the microcode tool"""
commands = 'create,license,list'.split(',')
parser = OptionParser()
parser.add_option('-d', '--mcfile', type='string', action='store',
help='Name of microcode.dat file')
parser.add_option('-H', '--headerfile', type='string', action='append',
help='Name of .h file containing microcode')
parser.add_option('-m', '--model', type='string', action='store',
help="Model name to extract ('all' for all)")
parser.add_option('-M', '--multiple', type='string', action='store',
help="Allow output of multiple models")
parser.add_option('-o', '--outfile', type='string', action='store',
help='Filename to use for output (- for stdout), default is'
' %s/<name>.dtsi' % MICROCODE_DIR)
parser.usage += """ command
Process an Intel microcode file (use -h for help). Commands:
create Create microcode .dtsi file for a model
list List available models in microcode file
license Print the license
Typical usage:
./tools/microcode-tool -d microcode.dat -m 306a create
This will find the appropriate file and write it to %s.""" % MICROCODE_DIR
(options, args) = parser.parse_args()
if not args:
parser.error('Please specify a command')
cmd = args[0]
if cmd not in commands:
parser.error("Unknown command '%s'" % cmd)
if (not not options.mcfile) != (not not options.mcfile):
parser.error("You must specify either header files or a microcode file, not both")
if options.headerfile:
date, license_text, microcodes = ParseHeaderFiles(options.headerfile)
elif options.mcfile:
date, license_text, microcodes = ParseFile(options.mcfile)
else:
parser.error('You must specify a microcode file (or header files)')
if cmd == 'list':
List(date, microcodes, options.model)
elif cmd == 'license':
print '\n'.join(license_text)
elif cmd == 'create':
if not options.model:
parser.error('You must specify a model to create')
model = options.model.lower()
if options.model == 'all':
options.multiple = True
mcode_list = microcodes.values()
tried = []
else:
mcode_list, tried = FindMicrocode(microcodes, model)
if not mcode_list:
parser.error("Unknown model '%s' (%s) - try 'list' to list" %
(model, ', '.join(tried)))
if not options.multiple and len(mcode_list) > 1:
parser.error("Ambiguous model '%s' (%s) matched %s - try 'list' "
"to list or specify a particular file" %
(model, ', '.join(tried),
', '.join([m.name for m in mcode_list])))
CreateFile(date, license_text, mcode_list, options.outfile)
else:
parser.error("Unknown command '%s'" % cmd)
if __name__ == "__main__":
MicrocodeTool()
| 34.823899
| 90
| 0.564475
|
from optparse import OptionParser
import os
import re
import struct
import sys
MICROCODE_DIR = 'arch/x86/dts/microcode'
class Microcode:
"""Holds information about the microcode for a particular model of CPU.
Attributes:
name: Name of the CPU this microcode is for, including any version
information (e.g. 'm12206a7_00000029')
model: Model code string (this is cpuid(1).eax, e.g. '206a7')
words: List of hex words containing the microcode. The first 16 words
are the public header.
"""
def __init__(self, name, data):
self.name = name
self.words = []
for value in ''.join(data).split(','):
hexval = value.strip()
if hexval:
self.words.append(int(hexval, 0))
self.model = '%x' % self.words[3]
def ParseFile(fname):
"""Parse a micrcode.dat file and return the component parts
Args:
fname: Filename to parse
Returns:
3-Tuple:
date: String containing date from the file's header
license_text: List of text lines for the license file
microcodes: List of Microcode objects from the file
"""
re_date = re.compile('/\* *(.* [0-9]{4}) *\*/$')
re_license = re.compile('/[^-*+] *(.*)$')
re_name = re.compile('/\* *(.*)\.inc *\*/', re.IGNORECASE)
microcodes = {}
license_text = []
date = ''
data = []
name = None
with open(fname) as fd:
for line in fd:
line = line.rstrip()
m_date = re_date.match(line)
m_license = re_license.match(line)
m_name = re_name.match(line)
if m_name:
if name:
microcodes[name] = Microcode(name, data)
name = m_name.group(1).lower()
data = []
elif m_license:
license_text.append(m_license.group(1))
elif m_date:
date = m_date.group(1)
else:
data.append(line)
if name:
microcodes[name] = Microcode(name, data)
return date, license_text, microcodes
def ParseHeaderFiles(fname_list):
"""Parse a list of header files and return the component parts
Args:
fname_list: List of files to parse
Returns:
date: String containing date from the file's header
license_text: List of text lines for the license file
microcodes: List of Microcode objects from the file
"""
microcodes = {}
license_text = []
date = ''
name = None
for fname in fname_list:
name = os.path.basename(fname).lower()
name = os.path.splitext(name)[0]
data = []
with open(fname) as fd:
license_start = False
license_end = False
for line in fd:
line = line.rstrip()
if len(line) >= 2:
if line[0] == '/' and line[1] == '*':
license_start = True
continue
if line[0] == '*' and line[1] == '/':
license_end = True
continue
if license_start and not license_end:
if len(line) > 0:
license_text.append(line)
continue
words = line.split(',')[:-1]
data += [word + ',' for word in words]
microcodes[name] = Microcode(name, data)
return date, license_text, microcodes
def List(date, microcodes, model):
"""List the available microcode chunks
Args:
date: Date of the microcode file
microcodes: Dict of Microcode objects indexed by name
model: Model string to search for, or None
"""
print 'Date: %s' % date
if model:
mcode_list, tried = FindMicrocode(microcodes, model.lower())
print 'Matching models %s:' % (', '.join(tried))
else:
print 'All models:'
mcode_list = [microcodes[m] for m in microcodes.keys()]
for mcode in mcode_list:
print '%-20s: model %s' % (mcode.name, mcode.model)
def FindMicrocode(microcodes, model):
"""Find all the microcode chunks which match the given model.
This model is something like 306a9 (the value returned in eax from
cpuid(1) when running on Intel CPUs). But we allow a partial match,
omitting the last 1 or two characters to allow many families to have the
same microcode.
If the model name is ambiguous we return a list of matches.
Args:
microcodes: Dict of Microcode objects indexed by name
model: String containing model name to find
Returns:
Tuple:
List of matching Microcode objects
List of abbreviations we tried
"""
mcode = microcodes.get(model)
if mcode:
return [mcode], []
tried = []
found = []
for i in range(3):
abbrev = model[:-i] if i else model
tried.append(abbrev)
for mcode in microcodes.values():
if mcode.model.startswith(abbrev):
found.append(mcode)
if found:
break
return found, tried
def CreateFile(date, license_text, mcodes, outfile):
"""Create a microcode file in U-Boot's .dtsi format
Args:
date: String containing date of original microcode file
license: List of text lines for the license file
mcodes: Microcode objects to write (normally only 1)
outfile: Filename to write to ('-' for stdout)
"""
out = '''/*%s
* ---
* This is a device tree fragment. Use #include to add these properties to a
* node.
*
* Date: %s
*/
compatible = "intel,microcode";
intel,header-version = <%d>;
intel,update-revision = <%#x>;
intel,date-code = <%#x>;
intel,processor-signature = <%#x>;
intel,checksum = <%#x>;
intel,loader-revision = <%d>;
intel,processor-flags = <%#x>;
/* The first 48-bytes are the public header which repeats the above data */
data = <%s
\t>;'''
words = ''
add_comments = len(mcodes) > 1
for mcode in mcodes:
if add_comments:
words += '\n/* %s */' % mcode.name
for i in range(len(mcode.words)):
if not (i & 3):
words += '\n'
val = mcode.words[i]
# Change each word so it will be little-endian in the FDT
# This data is needed before RAM is available on some platforms so
# we cannot do an endianness swap on boot.
val = struct.unpack("<I", struct.pack(">I", val))[0]
words += '\t%
# Use the first microcode for the headers
mcode = mcodes[0]
# Take care to avoid adding a space before a tab
text = ''
for line in license_text:
if line[0] == '\t':
text += '\n *' + line
else:
text += '\n * ' + line
args = [text, date]
args += [mcode.words[i] for i in range(7)]
args.append(words)
if outfile == '-':
print out % tuple(args)
else:
if not outfile:
if not os.path.exists(MICROCODE_DIR):
print >> sys.stderr, "Creating directory '%s'" % MICROCODE_DIR
os.makedirs(MICROCODE_DIR)
outfile = os.path.join(MICROCODE_DIR, mcode.name + '.dtsi')
print >> sys.stderr, "Writing microcode for '%s' to '%s'" % (
', '.join([mcode.name for mcode in mcodes]), outfile)
with open(outfile, 'w') as fd:
print >> fd, out % tuple(args)
def MicrocodeTool():
"""Run the microcode tool"""
commands = 'create,license,list'.split(',')
parser = OptionParser()
parser.add_option('-d', '--mcfile', type='string', action='store',
help='Name of microcode.dat file')
parser.add_option('-H', '--headerfile', type='string', action='append',
help='Name of .h file containing microcode')
parser.add_option('-m', '--model', type='string', action='store',
help="Model name to extract ('all' for all)")
parser.add_option('-M', '--multiple', type='string', action='store',
help="Allow output of multiple models")
parser.add_option('-o', '--outfile', type='string', action='store',
help='Filename to use for output (- for stdout), default is'
' %s/<name>.dtsi' % MICROCODE_DIR)
parser.usage += """ command
Process an Intel microcode file (use -h for help). Commands:
create Create microcode .dtsi file for a model
list List available models in microcode file
license Print the license
Typical usage:
./tools/microcode-tool -d microcode.dat -m 306a create
This will find the appropriate file and write it to %s.""" % MICROCODE_DIR
(options, args) = parser.parse_args()
if not args:
parser.error('Please specify a command')
cmd = args[0]
if cmd not in commands:
parser.error("Unknown command '%s'" % cmd)
if (not not options.mcfile) != (not not options.mcfile):
parser.error("You must specify either header files or a microcode file, not both")
if options.headerfile:
date, license_text, microcodes = ParseHeaderFiles(options.headerfile)
elif options.mcfile:
date, license_text, microcodes = ParseFile(options.mcfile)
else:
parser.error('You must specify a microcode file (or header files)')
if cmd == 'list':
List(date, microcodes, options.model)
elif cmd == 'license':
print '\n'.join(license_text)
elif cmd == 'create':
if not options.model:
parser.error('You must specify a model to create')
model = options.model.lower()
if options.model == 'all':
options.multiple = True
mcode_list = microcodes.values()
tried = []
else:
mcode_list, tried = FindMicrocode(microcodes, model)
if not mcode_list:
parser.error("Unknown model '%s' (%s) - try 'list' to list" %
(model, ', '.join(tried)))
if not options.multiple and len(mcode_list) > 1:
parser.error("Ambiguous model '%s' (%s) matched %s - try 'list' "
"to list or specify a particular file" %
(model, ', '.join(tried),
', '.join([m.name for m in mcode_list])))
CreateFile(date, license_text, mcode_list, options.outfile)
else:
parser.error("Unknown command '%s'" % cmd)
if __name__ == "__main__":
MicrocodeTool()
| false
| true
|
790c28156c1f9e13379b6d5b4cc63496002c2adc
| 39,088
|
py
|
Python
|
skbuild/setuptools_wrap.py
|
vyasr/scikit-build
|
5a821eb137d253e0380b4e76a2a692fba27d16dc
|
[
"MIT"
] | null | null | null |
skbuild/setuptools_wrap.py
|
vyasr/scikit-build
|
5a821eb137d253e0380b4e76a2a692fba27d16dc
|
[
"MIT"
] | null | null | null |
skbuild/setuptools_wrap.py
|
vyasr/scikit-build
|
5a821eb137d253e0380b4e76a2a692fba27d16dc
|
[
"MIT"
] | null | null | null |
"""This module provides functionality for wrapping key infrastructure components
from distutils and setuptools.
"""
from __future__ import print_function
import argparse
import copy
import json
import os
import os.path
import platform
import stat
import sys
import warnings
from contextlib import contextmanager
# pylint: disable-next=wrong-import-order
from distutils.errors import DistutilsArgError, DistutilsError, DistutilsGetoptError
from glob import glob
from shutil import copyfile, copymode
# Must be imported before distutils
import setuptools
if sys.version_info >= (3, 0):
from io import StringIO
else:
from StringIO import StringIO
if sys.version_info >= (3, 3):
from shutil import which
else:
from .compat import which
from packaging.requirements import Requirement
from packaging.version import parse as parse_version
from setuptools.dist import Distribution as upstream_Distribution
from . import cmaker
from .command import (
bdist,
bdist_wheel,
build,
build_ext,
build_py,
clean,
egg_info,
generate_source_manifest,
install,
install_lib,
install_scripts,
sdist,
test,
)
from .constants import (
CMAKE_DEFAULT_EXECUTABLE,
CMAKE_INSTALL_DIR,
CMAKE_SPEC_FILE,
set_skbuild_plat_name,
skbuild_plat_name,
)
from .exceptions import SKBuildError, SKBuildGeneratorNotFoundError
from .utils import (
PythonModuleFinder,
mkdir_p,
parse_manifestin,
to_platform_path,
to_unix_path,
)
def create_skbuild_argparser():
"""Create and return a scikit-build argument parser."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
"--build-type", default="Release", metavar="", help="specify the CMake build type (e.g. Debug or Release)"
)
parser.add_argument("-G", "--generator", metavar="", help="specify the CMake build system generator")
parser.add_argument("-j", metavar="N", type=int, dest="jobs", help="allow N build jobs at once")
parser.add_argument("--cmake-executable", default=None, metavar="", help="specify the path to the cmake executable")
parser.add_argument(
"--install-target",
default=None,
metavar="",
help="specify the CMake target performing the install. " "If not provided, uses the target ``install``",
)
parser.add_argument(
"--skip-generator-test",
action="store_true",
help="skip generator test when a generator is explicitly selected using --generator",
)
return parser
def _is_cmake_configure_argument(arg):
"""Return True if ``arg`` is a relevant argument to pass to cmake when configuring a project."""
for cmake_arg in (
"-C", # initial-cache
"-D", # <var>[:<type>]=<value>
):
if arg.startswith(cmake_arg):
return True
return False
def parse_skbuild_args(args, cmake_args, build_tool_args):
"""
Parse arguments in the scikit-build argument set. Convert specified
arguments to proper format and append to cmake_args and build_tool_args.
Returns the tuple ``(remaining arguments, cmake executable, skip_generator_test)``.
"""
parser = create_skbuild_argparser()
# Consider CMake arguments passed as global setuptools options
cmake_args.extend([arg for arg in args if _is_cmake_configure_argument(arg)])
# ... and remove them from the list
args = [arg for arg in args if not _is_cmake_configure_argument(arg)]
namespace, remaining_args = parser.parse_known_args(args)
# Construct CMake argument list
cmake_args.append("-DCMAKE_BUILD_TYPE:STRING=" + namespace.build_type)
if namespace.generator is not None:
cmake_args.extend(["-G", namespace.generator])
# Construct build tool argument list
build_tool_args.extend(["--config", namespace.build_type])
if namespace.jobs is not None:
build_tool_args.extend(["-j", str(namespace.jobs)])
if namespace.install_target is not None:
build_tool_args.extend(["--install-target", namespace.install_target])
if namespace.generator is None and namespace.skip_generator_test is True:
sys.exit("ERROR: Specifying --skip-generator-test requires --generator to also be specified.")
return remaining_args, namespace.cmake_executable, namespace.skip_generator_test
def parse_args():
"""This function parses the command-line arguments ``sys.argv`` and returns
the tuple ``(setuptools_args, cmake_executable, skip_generator_test, cmake_args, build_tool_args)``
where each ``*_args`` element corresponds to a set of arguments separated by ``--``."""
dutils = []
cmake = []
make = []
argsets = [dutils, cmake, make]
i = 0
separator = "--"
for arg in sys.argv:
if arg == separator:
i += 1
if i >= len(argsets):
sys.exit(
'ERROR: Too many "{}" separators provided '
"(expected at most {}).".format(separator, len(argsets) - 1)
)
else:
argsets[i].append(arg)
dutils, cmake_executable, skip_generator_test = parse_skbuild_args(dutils, cmake, make)
return dutils, cmake_executable, skip_generator_test, cmake, make
@contextmanager
def _capture_output():
oldout, olderr = sys.stdout, sys.stderr
try:
out = [StringIO(), StringIO()]
sys.stdout, sys.stderr = out
yield out
finally:
sys.stdout, sys.stderr = oldout, olderr
out[0] = out[0].getvalue()
out[1] = out[1].getvalue()
def _parse_setuptools_arguments(setup_attrs):
"""This function instantiates a Distribution object and
parses the command line arguments.
It returns the tuple ``(display_only, help_commands, commands, hide_listing, force_cmake, skip_cmake, plat_name)``
where
- display_only is a boolean indicating if an argument like '--help',
'--help-commands' or '--author' was passed.
- help_commands is a boolean indicating if argument '--help-commands'
was passed.
- commands contains the list of commands that were passed.
- hide_listing is a boolean indicating if the list of files being included
in the distribution is displayed or not.
- force_cmake a boolean indicating that CMake should always be executed.
- skip_cmake is a boolean indicating if the execution of CMake should
explicitly be skipped.
- plat_name is a string identifying the platform name to embed in generated
filenames. It defaults to :func:`skbuild.constants.skbuild_plat_name()`.
- build_ext_inplace is a boolean indicating if ``build_ext`` command was
specified along with the --inplace argument.
Otherwise it raises DistutilsArgError exception if there are
any error on the command-line, and it raises DistutilsGetoptError
if there any error in the command 'options' attribute.
The code has been adapted from the setup() function available
in distutils/core.py.
"""
setup_attrs = dict(setup_attrs)
setup_attrs["script_name"] = os.path.basename(sys.argv[0])
dist = upstream_Distribution(setup_attrs)
# Update class attribute to also ensure the argument is processed
# when ``setuptools.setup`` is called.
upstream_Distribution.global_options.extend(
[
("hide-listing", None, "do not display list of files being " "included in the distribution"),
("force-cmake", None, "always run CMake"),
("skip-cmake", None, "do not run CMake"),
]
)
# Find and parse the config file(s): they will override options from
# the setup script, but be overridden by the command line.
dist.parse_config_files()
# Parse the command line and override config files; any
# command-line errors are the end user's fault, so turn them into
# SystemExit to suppress tracebacks.
with _capture_output():
result = dist.parse_command_line()
display_only = not result
if not hasattr(dist, "hide_listing"):
dist.hide_listing = False
if not hasattr(dist, "force_cmake"):
dist.force_cmake = False
if not hasattr(dist, "skip_cmake"):
dist.skip_cmake = False
plat_names = set()
for cmd in [dist.get_command_obj(command) for command in dist.commands]:
if getattr(cmd, "plat_name", None) is not None:
plat_names.add(cmd.plat_name)
if not plat_names:
plat_names.add(None)
elif len(plat_names) > 1:
raise SKBuildError("--plat-name is ambiguous: %s" % ", ".join(plat_names))
plat_name = list(plat_names)[0]
build_ext_inplace = dist.get_command_obj("build_ext").inplace
return (
display_only,
dist.help_commands,
dist.commands,
dist.hide_listing,
dist.force_cmake,
dist.skip_cmake,
plat_name,
build_ext_inplace,
)
def _check_skbuild_parameters(skbuild_kw):
cmake_install_dir = skbuild_kw["cmake_install_dir"]
if os.path.isabs(cmake_install_dir):
raise SKBuildError(
(
"\n setup parameter 'cmake_install_dir' is set to "
"an absolute path. A relative path is expected.\n"
" Project Root : {}\n"
" CMake Install Directory: {}\n"
).format(os.getcwd(), cmake_install_dir)
)
cmake_source_dir = skbuild_kw["cmake_source_dir"]
if not os.path.exists(os.path.abspath(cmake_source_dir)):
raise SKBuildError(
(
"\n setup parameter 'cmake_source_dir' set to "
"a nonexistent directory.\n"
" Project Root : {}\n"
" CMake Source Directory: {}\n"
).format(os.getcwd(), cmake_source_dir)
)
def strip_package(package_parts, module_file):
"""Given ``package_parts`` (e.g. ``['foo', 'bar']``) and a
``module_file`` (e.g. ``foo/bar/jaz/rock/roll.py``), starting
from the left, this function will strip the parts of the path
matching the package parts and return a new string
(e.g ``jaz/rock/roll.py``).
The function will work as expected for either Windows or Unix-style
``module_file`` and this independently of the platform.
"""
if not package_parts or os.path.isabs(module_file):
return module_file
package = "/".join(package_parts)
module_dir = os.path.dirname(module_file.replace("\\", "/"))
module_dir = module_dir[: len(package)]
return module_file[len(package) + 1 :] if package != "" and module_dir.startswith(package) else module_file
def _package_data_contain_module(module, package_data):
"""Return True if the ``module`` is contained
in the ``package_data``.
``module`` is a tuple of the form
``(package, modulename, module_file)``.
"""
(package, _, module_file) = module
if package not in package_data:
return False
# We need to strip the package because a module entry
# usually looks like this:
#
# ('foo.bar', 'module', 'foo/bar/module.py')
#
# and the entry in package_data would look like this:
#
# {'foo.bar' : ['module.py']}
if strip_package(package.split("."), module_file) in package_data[package]:
return True
return False
def _should_run_cmake(commands, cmake_with_sdist):
"""Return True if at least one command requiring ``cmake`` to run
is found in ``commands``."""
for expected_command in [
"build",
"build_ext",
"develop",
"install",
"install_lib",
"bdist",
"bdist_dumb",
"bdist_egg",
"bdist_rpm",
"bdist_wininst",
"bdist_wheel",
"test",
]:
if expected_command in commands:
return True
if "sdist" in commands and cmake_with_sdist:
return True
return False
def _save_cmake_spec(args):
"""Save the CMake spec to disk"""
# We use JSON here because readability is more important than performance
try:
os.makedirs(os.path.dirname(CMAKE_SPEC_FILE()))
except OSError:
pass
with open(CMAKE_SPEC_FILE(), "w+") as fp:
json.dump(args, fp)
def _load_cmake_spec():
"""Load and return the CMake spec from disk"""
try:
with open(CMAKE_SPEC_FILE()) as fp:
return json.load(fp)
except (OSError, IOError, ValueError):
return None
# pylint:disable=too-many-locals, too-many-branches
def setup(*args, **kw): # noqa: C901
"""This function wraps setup() so that we can run cmake, make,
CMake build, then proceed as usual with setuptools, appending the
CMake-generated output as necessary.
The CMake project is re-configured only if needed. This is achieved by (1) retrieving the environment mapping
associated with the generator set in the ``CMakeCache.txt`` file, (2) saving the CMake configure arguments and
version in :func:`skbuild.constants.CMAKE_SPEC_FILE()`: and (3) re-configuring only if either the generator or
the CMake specs change.
"""
# If any, strip ending slash from each package directory
# Regular setuptools does not support this
# TODO: will become an error in the future
if "package_dir" in kw:
for package, prefix in kw["package_dir"].items():
if prefix.endswith("/"):
msg = "package_dir={{{!r}: {!r}}} ends with a trailing slash, which is not supported by setuptools.".format(
package, prefix
)
warnings.warn(msg, FutureWarning, stacklevel=2)
kw["package_dir"][package] = prefix[:-1]
sys.argv, cmake_executable, skip_generator_test, cmake_args, make_args = parse_args()
# work around https://bugs.python.org/issue1011113
# (patches provided, but no updates since 2014)
cmdclass = kw.get("cmdclass", {})
cmdclass["build"] = cmdclass.get("build", build.build)
cmdclass["build_py"] = cmdclass.get("build_py", build_py.build_py)
cmdclass["build_ext"] = cmdclass.get("build_ext", build_ext.build_ext)
cmdclass["install"] = cmdclass.get("install", install.install)
cmdclass["install_lib"] = cmdclass.get("install_lib", install_lib.install_lib)
cmdclass["install_scripts"] = cmdclass.get("install_scripts", install_scripts.install_scripts)
cmdclass["clean"] = cmdclass.get("clean", clean.clean)
cmdclass["sdist"] = cmdclass.get("sdist", sdist.sdist)
cmdclass["bdist"] = cmdclass.get("bdist", bdist.bdist)
cmdclass["bdist_wheel"] = cmdclass.get("bdist_wheel", bdist_wheel.bdist_wheel)
cmdclass["egg_info"] = cmdclass.get("egg_info", egg_info.egg_info)
cmdclass["generate_source_manifest"] = cmdclass.get(
"generate_source_manifest", generate_source_manifest.generate_source_manifest
)
cmdclass["test"] = cmdclass.get("test", test.test)
kw["cmdclass"] = cmdclass
# Extract setup keywords specific to scikit-build and remove them from kw.
# Removing the keyword from kw need to be done here otherwise, the
# following call to _parse_setuptools_arguments would complain about
# unknown setup options.
parameters = {
"cmake_args": [],
"cmake_install_dir": "",
"cmake_source_dir": "",
"cmake_with_sdist": False,
"cmake_languages": ("C", "CXX"),
"cmake_minimum_required_version": None,
"cmake_process_manifest_hook": None,
"cmake_install_target": "install",
}
skbuild_kw = {param: kw.pop(param, value) for param, value in parameters.items()}
# ... and validate them
try:
_check_skbuild_parameters(skbuild_kw)
except SKBuildError as ex:
import traceback # pylint: disable=import-outside-toplevel
print("Traceback (most recent call last):")
traceback.print_tb(sys.exc_info()[2])
print("")
sys.exit(ex)
# Convert source dir to a path relative to the root
# of the project
cmake_source_dir = skbuild_kw["cmake_source_dir"]
if cmake_source_dir == ".":
cmake_source_dir = ""
if os.path.isabs(cmake_source_dir):
cmake_source_dir = os.path.relpath(cmake_source_dir)
# Skip running CMake in the following cases:
# * flag "--skip-cmake" is provided
# * "display only" argument is provided (e.g '--help', '--author', ...)
# * no command-line arguments or invalid ones are provided
# * no command requiring cmake is provided
# * no CMakeLists.txt if found
display_only = has_invalid_arguments = help_commands = False
force_cmake = skip_cmake = False
commands = []
try:
(
display_only,
help_commands,
commands,
hide_listing,
force_cmake,
skip_cmake,
plat_name,
build_ext_inplace,
) = _parse_setuptools_arguments(kw)
except (DistutilsArgError, DistutilsGetoptError):
has_invalid_arguments = True
has_cmakelists = os.path.exists(os.path.join(cmake_source_dir, "CMakeLists.txt"))
if not has_cmakelists:
print("skipping skbuild (no CMakeLists.txt found)")
skip_skbuild = (
display_only
or has_invalid_arguments
or not _should_run_cmake(commands, skbuild_kw["cmake_with_sdist"])
or not has_cmakelists
)
if skip_skbuild and not force_cmake:
if help_commands:
# Prepend scikit-build help. Generate option descriptions using
# argparse.
skbuild_parser = create_skbuild_argparser()
arg_descriptions = [line for line in skbuild_parser.format_help().split("\n") if line.startswith(" ")]
print("scikit-build options:")
print("\n".join(arg_descriptions))
print("")
print('Arguments following a "--" are passed directly to CMake ' "(e.g. -DMY_VAR:BOOL=TRUE).")
print('Arguments following a second "--" are passed directly to ' " the build tool.")
print("")
return setuptools.setup(*args, **kw)
developer_mode = "develop" in commands or "test" in commands or build_ext_inplace
packages = kw.get("packages", [])
package_dir = kw.get("package_dir", {})
package_data = copy.deepcopy(kw.get("package_data", {}))
py_modules = kw.get("py_modules", [])
new_py_modules = {py_module: False for py_module in py_modules}
scripts = kw.get("scripts", [])
new_scripts = {script: False for script in scripts}
data_files = {(parent_dir or "."): set(file_list) for parent_dir, file_list in kw.get("data_files", [])}
# Since CMake arguments provided through the command line have more
# weight and when CMake is given multiple times a argument, only the last
# one is considered, let's prepend the one provided in the setup call.
cmake_args = skbuild_kw["cmake_args"] + cmake_args
# Handle cmake_install_target
# get the target (next item after '--install-target') or return '' if no --install-target
cmake_install_target_from_command = next(
(make_args[index + 1] for index, item in enumerate(make_args) if item == "--install-target"), ""
)
cmake_install_target_from_setup = skbuild_kw["cmake_install_target"]
# Setting target from command takes precedence
# cmake_install_target_from_setup has the default 'install',
# so cmake_install_target would never be empty.
if cmake_install_target_from_command:
cmake_install_target = cmake_install_target_from_command
else:
cmake_install_target = cmake_install_target_from_setup
# Parse CMAKE_ARGS
env_cmake_args = os.environ["CMAKE_ARGS"].split() if "CMAKE_ARGS" in os.environ else []
env_cmake_args = [s for s in env_cmake_args if "CMAKE_INSTALL_PREFIX" not in s]
# Using the environment variable CMAKE_ARGS has lower precedence than manual options
cmake_args = env_cmake_args + cmake_args
if sys.platform == "darwin":
# If no ``--plat-name`` argument was passed, set default value.
if plat_name is None:
plat_name = skbuild_plat_name()
(_, version, machine) = plat_name.split("-")
# The loop here allows for CMAKE_OSX_* command line arguments to overload
# values passed with either the ``--plat-name`` command-line argument
# or the ``cmake_args`` setup option.
for cmake_arg in cmake_args:
if "CMAKE_OSX_DEPLOYMENT_TARGET" in cmake_arg:
version = cmake_arg.split("=")[1]
if "CMAKE_OSX_ARCHITECTURES" in cmake_arg:
machine = cmake_arg.split("=")[1]
if set(machine.split(";")) == {"x86_64", "arm64"}:
machine = "universal2"
set_skbuild_plat_name("macosx-{}-{}".format(version, machine))
# Set platform env. variable so that commands (e.g. bdist_wheel)
# uses this information. The _PYTHON_HOST_PLATFORM env. variable is
# used in distutils.util.get_platform() function.
os.environ.setdefault("_PYTHON_HOST_PLATFORM", skbuild_plat_name())
# Set CMAKE_OSX_DEPLOYMENT_TARGET and CMAKE_OSX_ARCHITECTURES if not already
# specified
(_, version, machine) = skbuild_plat_name().split("-")
if not cmaker.has_cmake_cache_arg(cmake_args, "CMAKE_OSX_DEPLOYMENT_TARGET"):
cmake_args.append("-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=%s" % version)
if not cmaker.has_cmake_cache_arg(cmake_args, "CMAKE_OSX_ARCHITECTURES"):
machine_archs = "x86_64;arm64" if machine == "universal2" else machine
cmake_args.append("-DCMAKE_OSX_ARCHITECTURES:STRING=%s" % machine_archs)
# Install cmake if listed in `setup_requires`
for package in kw.get("setup_requires", []):
if Requirement(package).name == "cmake":
setup_requires = [package]
dist = upstream_Distribution({"setup_requires": setup_requires})
dist.fetch_build_eggs(setup_requires)
# Considering packages associated with "setup_requires" keyword are
# installed in .eggs subdirectory without honoring setuptools "console_scripts"
# entry_points and without settings the expected executable permissions, we are
# taking care of it below.
import cmake # pylint: disable=import-outside-toplevel
for executable in ["cmake", "cpack", "ctest"]:
executable = os.path.join(cmake.CMAKE_BIN_DIR, executable)
if platform.system().lower() == "windows":
executable += ".exe"
st = os.stat(executable)
permissions = st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod(executable, permissions)
cmake_executable = os.path.join(cmake.CMAKE_BIN_DIR, "cmake")
break
# Languages are used to determine a working generator
cmake_languages = skbuild_kw["cmake_languages"]
try:
if cmake_executable is None:
cmake_executable = CMAKE_DEFAULT_EXECUTABLE
cmkr = cmaker.CMaker(cmake_executable)
if not skip_cmake:
cmake_minimum_required_version = skbuild_kw["cmake_minimum_required_version"]
if cmake_minimum_required_version is not None:
if parse_version(cmkr.cmake_version) < parse_version(cmake_minimum_required_version):
raise SKBuildError(
"CMake version {} or higher is required. CMake version {} is being used".format(
cmake_minimum_required_version, cmkr.cmake_version
)
)
# Used to confirm that the cmake executable is the same, and that the environment
# didn't change
cmake_spec = {
"args": [which(CMAKE_DEFAULT_EXECUTABLE)] + cmake_args,
"version": cmkr.cmake_version,
"environment": {
"PYTHONNOUSERSITE": os.environ.get("PYTHONNOUSERSITE"),
"PYTHONPATH": os.environ.get("PYTHONPATH"),
},
}
# skip the configure step for a cached build
env = cmkr.get_cached_generator_env()
if env is None or cmake_spec != _load_cmake_spec():
env = cmkr.configure(
cmake_args,
skip_generator_test=skip_generator_test,
cmake_source_dir=cmake_source_dir,
cmake_install_dir=skbuild_kw["cmake_install_dir"],
languages=cmake_languages,
)
_save_cmake_spec(cmake_spec)
cmkr.make(make_args, install_target=cmake_install_target, env=env)
except SKBuildGeneratorNotFoundError as ex:
sys.exit(ex)
except SKBuildError as ex:
import traceback # pylint: disable=import-outside-toplevel
print("Traceback (most recent call last):")
traceback.print_tb(sys.exc_info()[2])
print("")
sys.exit(ex)
# If needed, set reasonable defaults for package_dir
for package in packages:
if package not in package_dir:
package_dir[package] = package.replace(".", "/")
if "" in package_dir:
package_dir[package] = to_unix_path(os.path.join(package_dir[""], package_dir[package]))
kw["package_dir"] = package_dir
package_prefixes = _collect_package_prefixes(package_dir, packages)
# This hook enables custom processing of the cmake manifest
cmake_manifest = cmkr.install()
process_manifest = skbuild_kw.get("cmake_process_manifest_hook")
if process_manifest is not None:
if callable(process_manifest):
cmake_manifest = process_manifest(cmake_manifest)
else:
raise SKBuildError("The cmake_process_manifest_hook argument should be callable.")
_classify_installed_files(
cmake_manifest,
package_data,
package_prefixes,
py_modules,
new_py_modules,
scripts,
new_scripts,
data_files,
cmake_source_dir,
skbuild_kw["cmake_install_dir"],
)
original_manifestin_data_files = []
if kw.get("include_package_data", False):
original_manifestin_data_files = parse_manifestin(os.path.join(os.getcwd(), "MANIFEST.in"))
for path in original_manifestin_data_files:
_classify_file(
path, package_data, package_prefixes, py_modules, new_py_modules, scripts, new_scripts, data_files
)
if developer_mode:
# Copy packages
for package, package_file_list in package_data.items():
for package_file in package_file_list:
package_file = os.path.join(package_dir[package], package_file)
cmake_file = os.path.join(CMAKE_INSTALL_DIR(), package_file)
if os.path.exists(cmake_file):
_copy_file(cmake_file, package_file, hide_listing)
# Copy modules
for py_module in py_modules:
package_file = py_module + ".py"
cmake_file = os.path.join(CMAKE_INSTALL_DIR(), package_file)
if os.path.exists(cmake_file):
_copy_file(cmake_file, package_file, hide_listing)
else:
_consolidate_package_modules(cmake_source_dir, packages, package_dir, py_modules, package_data, hide_listing)
original_package_data = kw.get("package_data", {}).copy()
_consolidate_package_data_files(original_package_data, package_prefixes, hide_listing)
for data_file in original_manifestin_data_files:
dest_data_file = os.path.join(CMAKE_INSTALL_DIR(), data_file)
_copy_file(data_file, dest_data_file, hide_listing)
kw["package_data"] = package_data
kw["package_dir"] = {
package: (
os.path.join(CMAKE_INSTALL_DIR(), prefix)
if os.path.exists(os.path.join(CMAKE_INSTALL_DIR(), prefix))
else prefix
)
for prefix, package in package_prefixes
}
kw["scripts"] = [
os.path.join(CMAKE_INSTALL_DIR(), script) if mask else script for script, mask in new_scripts.items()
]
kw["data_files"] = [(parent_dir, list(file_set)) for parent_dir, file_set in data_files.items()]
if "zip_safe" not in kw:
kw["zip_safe"] = False
# Adapted from espdev/ITKPythonInstaller/setup.py.in
class BinaryDistribution(upstream_Distribution): # pylint: disable=missing-class-docstring
def has_ext_modules(self): # pylint: disable=no-self-use,missing-function-docstring
return has_cmakelists
kw["distclass"] = BinaryDistribution
print("")
return setuptools.setup(*args, **kw)
def _collect_package_prefixes(package_dir, packages):
"""
Collect the list of prefixes for all packages
The list is used to match paths in the install manifest to packages
specified in the setup.py script.
The list is sorted in decreasing order of prefix length so that paths are
matched with their immediate parent package, instead of any of that
package's ancestors.
For example, consider the project structure below. Assume that the
setup call was made with a package list featuring "top" and "top.bar", but
not "top.not_a_subpackage".
::
top/ -> top/
__init__.py -> top/__init__.py (parent: top)
foo.py -> top/foo.py (parent: top)
bar/ -> top/bar/ (parent: top)
__init__.py -> top/bar/__init__.py (parent: top.bar)
not_a_subpackage/ -> top/not_a_subpackage/ (parent: top)
data_0.txt -> top/not_a_subpackage/data_0.txt (parent: top)
data_1.txt -> top/not_a_subpackage/data_1.txt (parent: top)
The paths in the generated install manifest are matched to packages
according to the parents indicated on the right. Only packages that are
specified in the setup() call are considered. Because of the sort order,
the data files on the bottom would have been mapped to
"top.not_a_subpackage" instead of "top", proper -- had such a package been
specified.
"""
return list(
sorted(
((package_dir[package].replace(".", "/"), package) for package in packages),
key=lambda tup: len(tup[0]),
reverse=True,
)
)
def _classify_installed_files(
install_paths,
package_data,
package_prefixes,
py_modules,
new_py_modules,
scripts,
new_scripts,
data_files,
cmake_source_dir,
_cmake_install_dir,
):
assert not os.path.isabs(cmake_source_dir)
assert cmake_source_dir != "."
install_root = os.path.join(os.getcwd(), CMAKE_INSTALL_DIR())
for path in install_paths:
# if this installed file is not within the project root, complain and
# exit
if not to_platform_path(path).startswith(CMAKE_INSTALL_DIR()):
raise SKBuildError(
(
"\n CMake-installed files must be within the project root.\n"
" Project Root : {}\n"
" Violating File: {}\n"
).format(install_root, to_platform_path(path))
)
# peel off the 'skbuild' prefix
path = to_unix_path(os.path.relpath(path, CMAKE_INSTALL_DIR()))
_classify_file(
path, package_data, package_prefixes, py_modules, new_py_modules, scripts, new_scripts, data_files
)
def _classify_file(path, package_data, package_prefixes, py_modules, new_py_modules, scripts, new_scripts, data_files):
found_package = False
found_module = False
found_script = False
path = to_unix_path(path)
# check to see if path is part of a package
for prefix, package in package_prefixes:
if path.startswith(prefix + "/"):
# peel off the package prefix
path = to_unix_path(os.path.relpath(path, prefix))
package_file_list = package_data.get(package, [])
package_file_list.append(path)
package_data[package] = package_file_list
found_package = True
break
if found_package:
return
# If control reaches this point, then this installed file is not part of
# a package.
# check if path is a module
for module in py_modules:
if path.replace("/", ".") == ".".join((module, "py")):
new_py_modules[module] = True
found_module = True
break
if found_module:
return
# If control reaches this point, then this installed file is not a
# module
# if the file is a script, mark the corresponding script
for script in scripts:
if path == script:
new_scripts[script] = True
found_script = True
break
if found_script:
return
# If control reaches this point, then this installed file is not a
# script
# If control reaches this point, then we have installed files that are
# not part of a package, not a module, nor a script. Without any other
# information, we can only treat it as a generic data file.
parent_dir = os.path.dirname(path)
file_set = data_files.get(parent_dir)
if file_set is None:
file_set = set()
data_files[parent_dir] = file_set
file_set.add(os.path.join(CMAKE_INSTALL_DIR(), path))
def _copy_file(src_file, dest_file, hide_listing=True):
"""Copy ``src_file`` to ``dest_file`` ensuring parent directory exists.
By default, message like `creating directory /path/to/package` and
`copying directory /src/path/to/package -> path/to/package` are displayed
on standard output. Setting ``hide_listing`` to False avoids message from
being displayed.
"""
# Create directory if needed
dest_dir = os.path.dirname(dest_file)
if dest_dir != "" and not os.path.exists(dest_dir):
if not hide_listing:
print("creating directory {}".format(dest_dir))
mkdir_p(dest_dir)
# Copy file
if not hide_listing:
print("copying {} -> {}".format(src_file, dest_file))
copyfile(src_file, dest_file)
copymode(src_file, dest_file)
def _consolidate_package_modules(cmake_source_dir, packages, package_dir, py_modules, package_data, hide_listing):
"""This function consolidates packages having modules located in
both the source tree and the CMake install tree into one location.
The one location is the CMake install tree
(see :func:`.constants.CMAKE_INSTALL_DIR()`).
Why ? This is a necessary evil because ``Setuptools`` keeps track of
packages and modules files to install using a dictionary of lists where
the key are package names (e.g ``foo.bar``) and the values are lists of
module files (e.g ``['__init__.py', 'baz.py']``. Since this doesn't allow
to "split" files associated with a given module in multiple location, one
location is selected, and files are copied over.
How? It currently searches for modules across both locations using
the :class:`.utils.PythonModuleFinder`. then with the help
of :func:`_package_data_contain_module`, it identifies which
one are either already included or missing from the distribution.
Once a module has been identified as ``missing``, it is both copied
into the :func:`.constants.CMAKE_INSTALL_DIR()` and added to the
``package_data`` dictionary so that it can be considered by
the upstream setup function.
"""
try:
# Search for python modules in both the current directory
# and cmake install tree.
modules = PythonModuleFinder(
packages, package_dir, py_modules, alternative_build_base=CMAKE_INSTALL_DIR()
).find_all_modules()
except DistutilsError as msg:
raise SystemExit("error: {}".format(str(msg)))
print("")
for entry in modules:
# Check if module file should be copied into the CMake install tree.
if _package_data_contain_module(entry, package_data):
continue
(package, _, src_module_file) = entry
# Copy missing module file
if os.path.exists(src_module_file):
dest_module_file = os.path.join(CMAKE_INSTALL_DIR(), src_module_file)
_copy_file(src_module_file, dest_module_file, hide_listing)
# Since the mapping in package_data expects the package to be associated
# with a list of files relative to the directory containing the package,
# the following section makes sure to strip the redundant part of the
# module file path.
# The redundant part should be stripped for both cmake_source_dir and
# the package.
package_parts = []
if cmake_source_dir:
package_parts = cmake_source_dir.split(os.path.sep)
package_parts += package.split(".")
stripped_module_file = strip_package(package_parts, src_module_file)
# Update list of files associated with the corresponding package
try:
package_data[package].append(stripped_module_file)
except KeyError:
package_data[package] = [stripped_module_file]
def _consolidate_package_data_files(original_package_data, package_prefixes, hide_listing):
"""This function copies package data files specified using the ``package_data`` keyword
into :func:`.constants.CMAKE_INSTALL_DIR()`.
::
setup(...,
packages=['mypkg'],
package_dir={'mypkg': 'src/mypkg'},
package_data={'mypkg': ['data/*.dat']},
)
Considering that (1) the packages associated with modules located in both the source tree and
the CMake install tree are consolidated into the CMake install tree, and (2) the consolidated
package path set in the ``package_dir`` dictionary and later used by setuptools to package
(or install) modules and data files is :func:`.constants.CMAKE_INSTALL_DIR()`, copying the data files
is required to ensure setuptools can find them when it uses the package directory.
"""
project_root = os.getcwd()
for prefix, package in package_prefixes:
if package not in original_package_data:
continue
raw_patterns = original_package_data[package]
for pattern in raw_patterns:
expanded_package_dir = os.path.join(project_root, prefix, pattern)
for src_data_file in glob(expanded_package_dir):
full_prefix_length = len(os.path.join(project_root, prefix)) + 1
data_file = src_data_file[full_prefix_length:]
dest_data_file = os.path.join(CMAKE_INSTALL_DIR(), prefix, data_file)
_copy_file(src_data_file, dest_data_file, hide_listing)
| 38.586377
| 124
| 0.656698
|
from __future__ import print_function
import argparse
import copy
import json
import os
import os.path
import platform
import stat
import sys
import warnings
from contextlib import contextmanager
from distutils.errors import DistutilsArgError, DistutilsError, DistutilsGetoptError
from glob import glob
from shutil import copyfile, copymode
import setuptools
if sys.version_info >= (3, 0):
from io import StringIO
else:
from StringIO import StringIO
if sys.version_info >= (3, 3):
from shutil import which
else:
from .compat import which
from packaging.requirements import Requirement
from packaging.version import parse as parse_version
from setuptools.dist import Distribution as upstream_Distribution
from . import cmaker
from .command import (
bdist,
bdist_wheel,
build,
build_ext,
build_py,
clean,
egg_info,
generate_source_manifest,
install,
install_lib,
install_scripts,
sdist,
test,
)
from .constants import (
CMAKE_DEFAULT_EXECUTABLE,
CMAKE_INSTALL_DIR,
CMAKE_SPEC_FILE,
set_skbuild_plat_name,
skbuild_plat_name,
)
from .exceptions import SKBuildError, SKBuildGeneratorNotFoundError
from .utils import (
PythonModuleFinder,
mkdir_p,
parse_manifestin,
to_platform_path,
to_unix_path,
)
def create_skbuild_argparser():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
"--build-type", default="Release", metavar="", help="specify the CMake build type (e.g. Debug or Release)"
)
parser.add_argument("-G", "--generator", metavar="", help="specify the CMake build system generator")
parser.add_argument("-j", metavar="N", type=int, dest="jobs", help="allow N build jobs at once")
parser.add_argument("--cmake-executable", default=None, metavar="", help="specify the path to the cmake executable")
parser.add_argument(
"--install-target",
default=None,
metavar="",
help="specify the CMake target performing the install. " "If not provided, uses the target ``install``",
)
parser.add_argument(
"--skip-generator-test",
action="store_true",
help="skip generator test when a generator is explicitly selected using --generator",
)
return parser
def _is_cmake_configure_argument(arg):
for cmake_arg in (
"-C",
"-D",
):
if arg.startswith(cmake_arg):
return True
return False
def parse_skbuild_args(args, cmake_args, build_tool_args):
parser = create_skbuild_argparser()
cmake_args.extend([arg for arg in args if _is_cmake_configure_argument(arg)])
args = [arg for arg in args if not _is_cmake_configure_argument(arg)]
namespace, remaining_args = parser.parse_known_args(args)
cmake_args.append("-DCMAKE_BUILD_TYPE:STRING=" + namespace.build_type)
if namespace.generator is not None:
cmake_args.extend(["-G", namespace.generator])
build_tool_args.extend(["--config", namespace.build_type])
if namespace.jobs is not None:
build_tool_args.extend(["-j", str(namespace.jobs)])
if namespace.install_target is not None:
build_tool_args.extend(["--install-target", namespace.install_target])
if namespace.generator is None and namespace.skip_generator_test is True:
sys.exit("ERROR: Specifying --skip-generator-test requires --generator to also be specified.")
return remaining_args, namespace.cmake_executable, namespace.skip_generator_test
def parse_args():
dutils = []
cmake = []
make = []
argsets = [dutils, cmake, make]
i = 0
separator = "--"
for arg in sys.argv:
if arg == separator:
i += 1
if i >= len(argsets):
sys.exit(
'ERROR: Too many "{}" separators provided '
"(expected at most {}).".format(separator, len(argsets) - 1)
)
else:
argsets[i].append(arg)
dutils, cmake_executable, skip_generator_test = parse_skbuild_args(dutils, cmake, make)
return dutils, cmake_executable, skip_generator_test, cmake, make
@contextmanager
def _capture_output():
oldout, olderr = sys.stdout, sys.stderr
try:
out = [StringIO(), StringIO()]
sys.stdout, sys.stderr = out
yield out
finally:
sys.stdout, sys.stderr = oldout, olderr
out[0] = out[0].getvalue()
out[1] = out[1].getvalue()
def _parse_setuptools_arguments(setup_attrs):
setup_attrs = dict(setup_attrs)
setup_attrs["script_name"] = os.path.basename(sys.argv[0])
dist = upstream_Distribution(setup_attrs)
upstream_Distribution.global_options.extend(
[
("hide-listing", None, "do not display list of files being " "included in the distribution"),
("force-cmake", None, "always run CMake"),
("skip-cmake", None, "do not run CMake"),
]
)
dist.parse_config_files()
# SystemExit to suppress tracebacks.
with _capture_output():
result = dist.parse_command_line()
display_only = not result
if not hasattr(dist, "hide_listing"):
dist.hide_listing = False
if not hasattr(dist, "force_cmake"):
dist.force_cmake = False
if not hasattr(dist, "skip_cmake"):
dist.skip_cmake = False
plat_names = set()
for cmd in [dist.get_command_obj(command) for command in dist.commands]:
if getattr(cmd, "plat_name", None) is not None:
plat_names.add(cmd.plat_name)
if not plat_names:
plat_names.add(None)
elif len(plat_names) > 1:
raise SKBuildError("--plat-name is ambiguous: %s" % ", ".join(plat_names))
plat_name = list(plat_names)[0]
build_ext_inplace = dist.get_command_obj("build_ext").inplace
return (
display_only,
dist.help_commands,
dist.commands,
dist.hide_listing,
dist.force_cmake,
dist.skip_cmake,
plat_name,
build_ext_inplace,
)
def _check_skbuild_parameters(skbuild_kw):
cmake_install_dir = skbuild_kw["cmake_install_dir"]
if os.path.isabs(cmake_install_dir):
raise SKBuildError(
(
"\n setup parameter 'cmake_install_dir' is set to "
"an absolute path. A relative path is expected.\n"
" Project Root : {}\n"
" CMake Install Directory: {}\n"
).format(os.getcwd(), cmake_install_dir)
)
cmake_source_dir = skbuild_kw["cmake_source_dir"]
if not os.path.exists(os.path.abspath(cmake_source_dir)):
raise SKBuildError(
(
"\n setup parameter 'cmake_source_dir' set to "
"a nonexistent directory.\n"
" Project Root : {}\n"
" CMake Source Directory: {}\n"
).format(os.getcwd(), cmake_source_dir)
)
def strip_package(package_parts, module_file):
if not package_parts or os.path.isabs(module_file):
return module_file
package = "/".join(package_parts)
module_dir = os.path.dirname(module_file.replace("\\", "/"))
module_dir = module_dir[: len(package)]
return module_file[len(package) + 1 :] if package != "" and module_dir.startswith(package) else module_file
def _package_data_contain_module(module, package_data):
(package, _, module_file) = module
if package not in package_data:
return False
# We need to strip the package because a module entry
# usually looks like this:
#
# ('foo.bar', 'module', 'foo/bar/module.py')
#
# and the entry in package_data would look like this:
#
# {'foo.bar' : ['module.py']}
if strip_package(package.split("."), module_file) in package_data[package]:
return True
return False
def _should_run_cmake(commands, cmake_with_sdist):
for expected_command in [
"build",
"build_ext",
"develop",
"install",
"install_lib",
"bdist",
"bdist_dumb",
"bdist_egg",
"bdist_rpm",
"bdist_wininst",
"bdist_wheel",
"test",
]:
if expected_command in commands:
return True
if "sdist" in commands and cmake_with_sdist:
return True
return False
def _save_cmake_spec(args):
# We use JSON here because readability is more important than performance
try:
os.makedirs(os.path.dirname(CMAKE_SPEC_FILE()))
except OSError:
pass
with open(CMAKE_SPEC_FILE(), "w+") as fp:
json.dump(args, fp)
def _load_cmake_spec():
try:
with open(CMAKE_SPEC_FILE()) as fp:
return json.load(fp)
except (OSError, IOError, ValueError):
return None
# pylint:disable=too-many-locals, too-many-branches
def setup(*args, **kw): # noqa: C901
# If any, strip ending slash from each package directory
# Regular setuptools does not support this
# TODO: will become an error in the future
if "package_dir" in kw:
for package, prefix in kw["package_dir"].items():
if prefix.endswith("/"):
msg = "package_dir={{{!r}: {!r}}} ends with a trailing slash, which is not supported by setuptools.".format(
package, prefix
)
warnings.warn(msg, FutureWarning, stacklevel=2)
kw["package_dir"][package] = prefix[:-1]
sys.argv, cmake_executable, skip_generator_test, cmake_args, make_args = parse_args()
# work around https://bugs.python.org/issue1011113
# (patches provided, but no updates since 2014)
cmdclass = kw.get("cmdclass", {})
cmdclass["build"] = cmdclass.get("build", build.build)
cmdclass["build_py"] = cmdclass.get("build_py", build_py.build_py)
cmdclass["build_ext"] = cmdclass.get("build_ext", build_ext.build_ext)
cmdclass["install"] = cmdclass.get("install", install.install)
cmdclass["install_lib"] = cmdclass.get("install_lib", install_lib.install_lib)
cmdclass["install_scripts"] = cmdclass.get("install_scripts", install_scripts.install_scripts)
cmdclass["clean"] = cmdclass.get("clean", clean.clean)
cmdclass["sdist"] = cmdclass.get("sdist", sdist.sdist)
cmdclass["bdist"] = cmdclass.get("bdist", bdist.bdist)
cmdclass["bdist_wheel"] = cmdclass.get("bdist_wheel", bdist_wheel.bdist_wheel)
cmdclass["egg_info"] = cmdclass.get("egg_info", egg_info.egg_info)
cmdclass["generate_source_manifest"] = cmdclass.get(
"generate_source_manifest", generate_source_manifest.generate_source_manifest
)
cmdclass["test"] = cmdclass.get("test", test.test)
kw["cmdclass"] = cmdclass
# Extract setup keywords specific to scikit-build and remove them from kw.
# Removing the keyword from kw need to be done here otherwise, the
# following call to _parse_setuptools_arguments would complain about
# unknown setup options.
parameters = {
"cmake_args": [],
"cmake_install_dir": "",
"cmake_source_dir": "",
"cmake_with_sdist": False,
"cmake_languages": ("C", "CXX"),
"cmake_minimum_required_version": None,
"cmake_process_manifest_hook": None,
"cmake_install_target": "install",
}
skbuild_kw = {param: kw.pop(param, value) for param, value in parameters.items()}
# ... and validate them
try:
_check_skbuild_parameters(skbuild_kw)
except SKBuildError as ex:
import traceback # pylint: disable=import-outside-toplevel
print("Traceback (most recent call last):")
traceback.print_tb(sys.exc_info()[2])
print("")
sys.exit(ex)
# Convert source dir to a path relative to the root
# of the project
cmake_source_dir = skbuild_kw["cmake_source_dir"]
if cmake_source_dir == ".":
cmake_source_dir = ""
if os.path.isabs(cmake_source_dir):
cmake_source_dir = os.path.relpath(cmake_source_dir)
# Skip running CMake in the following cases:
# * flag "--skip-cmake" is provided
# * "display only" argument is provided (e.g '--help', '--author', ...)
# * no command-line arguments or invalid ones are provided
# * no command requiring cmake is provided
# * no CMakeLists.txt if found
display_only = has_invalid_arguments = help_commands = False
force_cmake = skip_cmake = False
commands = []
try:
(
display_only,
help_commands,
commands,
hide_listing,
force_cmake,
skip_cmake,
plat_name,
build_ext_inplace,
) = _parse_setuptools_arguments(kw)
except (DistutilsArgError, DistutilsGetoptError):
has_invalid_arguments = True
has_cmakelists = os.path.exists(os.path.join(cmake_source_dir, "CMakeLists.txt"))
if not has_cmakelists:
print("skipping skbuild (no CMakeLists.txt found)")
skip_skbuild = (
display_only
or has_invalid_arguments
or not _should_run_cmake(commands, skbuild_kw["cmake_with_sdist"])
or not has_cmakelists
)
if skip_skbuild and not force_cmake:
if help_commands:
# Prepend scikit-build help. Generate option descriptions using
# argparse.
skbuild_parser = create_skbuild_argparser()
arg_descriptions = [line for line in skbuild_parser.format_help().split("\n") if line.startswith(" ")]
print("scikit-build options:")
print("\n".join(arg_descriptions))
print("")
print('Arguments following a "--" are passed directly to CMake ' "(e.g. -DMY_VAR:BOOL=TRUE).")
print('Arguments following a second "--" are passed directly to ' " the build tool.")
print("")
return setuptools.setup(*args, **kw)
developer_mode = "develop" in commands or "test" in commands or build_ext_inplace
packages = kw.get("packages", [])
package_dir = kw.get("package_dir", {})
package_data = copy.deepcopy(kw.get("package_data", {}))
py_modules = kw.get("py_modules", [])
new_py_modules = {py_module: False for py_module in py_modules}
scripts = kw.get("scripts", [])
new_scripts = {script: False for script in scripts}
data_files = {(parent_dir or "."): set(file_list) for parent_dir, file_list in kw.get("data_files", [])}
# Since CMake arguments provided through the command line have more
# weight and when CMake is given multiple times a argument, only the last
# one is considered, let's prepend the one provided in the setup call.
cmake_args = skbuild_kw["cmake_args"] + cmake_args
cmake_install_target_from_command = next(
(make_args[index + 1] for index, item in enumerate(make_args) if item == "--install-target"), ""
)
cmake_install_target_from_setup = skbuild_kw["cmake_install_target"]
if cmake_install_target_from_command:
cmake_install_target = cmake_install_target_from_command
else:
cmake_install_target = cmake_install_target_from_setup
env_cmake_args = os.environ["CMAKE_ARGS"].split() if "CMAKE_ARGS" in os.environ else []
env_cmake_args = [s for s in env_cmake_args if "CMAKE_INSTALL_PREFIX" not in s]
cmake_args = env_cmake_args + cmake_args
if sys.platform == "darwin":
if plat_name is None:
plat_name = skbuild_plat_name()
(_, version, machine) = plat_name.split("-")
for cmake_arg in cmake_args:
if "CMAKE_OSX_DEPLOYMENT_TARGET" in cmake_arg:
version = cmake_arg.split("=")[1]
if "CMAKE_OSX_ARCHITECTURES" in cmake_arg:
machine = cmake_arg.split("=")[1]
if set(machine.split(";")) == {"x86_64", "arm64"}:
machine = "universal2"
set_skbuild_plat_name("macosx-{}-{}".format(version, machine))
os.environ.setdefault("_PYTHON_HOST_PLATFORM", skbuild_plat_name())
(_, version, machine) = skbuild_plat_name().split("-")
if not cmaker.has_cmake_cache_arg(cmake_args, "CMAKE_OSX_DEPLOYMENT_TARGET"):
cmake_args.append("-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=%s" % version)
if not cmaker.has_cmake_cache_arg(cmake_args, "CMAKE_OSX_ARCHITECTURES"):
machine_archs = "x86_64;arm64" if machine == "universal2" else machine
cmake_args.append("-DCMAKE_OSX_ARCHITECTURES:STRING=%s" % machine_archs)
for package in kw.get("setup_requires", []):
if Requirement(package).name == "cmake":
setup_requires = [package]
dist = upstream_Distribution({"setup_requires": setup_requires})
dist.fetch_build_eggs(setup_requires)
import cmake
for executable in ["cmake", "cpack", "ctest"]:
executable = os.path.join(cmake.CMAKE_BIN_DIR, executable)
if platform.system().lower() == "windows":
executable += ".exe"
st = os.stat(executable)
permissions = st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod(executable, permissions)
cmake_executable = os.path.join(cmake.CMAKE_BIN_DIR, "cmake")
break
cmake_languages = skbuild_kw["cmake_languages"]
try:
if cmake_executable is None:
cmake_executable = CMAKE_DEFAULT_EXECUTABLE
cmkr = cmaker.CMaker(cmake_executable)
if not skip_cmake:
cmake_minimum_required_version = skbuild_kw["cmake_minimum_required_version"]
if cmake_minimum_required_version is not None:
if parse_version(cmkr.cmake_version) < parse_version(cmake_minimum_required_version):
raise SKBuildError(
"CMake version {} or higher is required. CMake version {} is being used".format(
cmake_minimum_required_version, cmkr.cmake_version
)
)
cmake_spec = {
"args": [which(CMAKE_DEFAULT_EXECUTABLE)] + cmake_args,
"version": cmkr.cmake_version,
"environment": {
"PYTHONNOUSERSITE": os.environ.get("PYTHONNOUSERSITE"),
"PYTHONPATH": os.environ.get("PYTHONPATH"),
},
}
# skip the configure step for a cached build
env = cmkr.get_cached_generator_env()
if env is None or cmake_spec != _load_cmake_spec():
env = cmkr.configure(
cmake_args,
skip_generator_test=skip_generator_test,
cmake_source_dir=cmake_source_dir,
cmake_install_dir=skbuild_kw["cmake_install_dir"],
languages=cmake_languages,
)
_save_cmake_spec(cmake_spec)
cmkr.make(make_args, install_target=cmake_install_target, env=env)
except SKBuildGeneratorNotFoundError as ex:
sys.exit(ex)
except SKBuildError as ex:
import traceback # pylint: disable=import-outside-toplevel
print("Traceback (most recent call last):")
traceback.print_tb(sys.exc_info()[2])
print("")
sys.exit(ex)
# If needed, set reasonable defaults for package_dir
for package in packages:
if package not in package_dir:
package_dir[package] = package.replace(".", "/")
if "" in package_dir:
package_dir[package] = to_unix_path(os.path.join(package_dir[""], package_dir[package]))
kw["package_dir"] = package_dir
package_prefixes = _collect_package_prefixes(package_dir, packages)
# This hook enables custom processing of the cmake manifest
cmake_manifest = cmkr.install()
process_manifest = skbuild_kw.get("cmake_process_manifest_hook")
if process_manifest is not None:
if callable(process_manifest):
cmake_manifest = process_manifest(cmake_manifest)
else:
raise SKBuildError("The cmake_process_manifest_hook argument should be callable.")
_classify_installed_files(
cmake_manifest,
package_data,
package_prefixes,
py_modules,
new_py_modules,
scripts,
new_scripts,
data_files,
cmake_source_dir,
skbuild_kw["cmake_install_dir"],
)
original_manifestin_data_files = []
if kw.get("include_package_data", False):
original_manifestin_data_files = parse_manifestin(os.path.join(os.getcwd(), "MANIFEST.in"))
for path in original_manifestin_data_files:
_classify_file(
path, package_data, package_prefixes, py_modules, new_py_modules, scripts, new_scripts, data_files
)
if developer_mode:
# Copy packages
for package, package_file_list in package_data.items():
for package_file in package_file_list:
package_file = os.path.join(package_dir[package], package_file)
cmake_file = os.path.join(CMAKE_INSTALL_DIR(), package_file)
if os.path.exists(cmake_file):
_copy_file(cmake_file, package_file, hide_listing)
# Copy modules
for py_module in py_modules:
package_file = py_module + ".py"
cmake_file = os.path.join(CMAKE_INSTALL_DIR(), package_file)
if os.path.exists(cmake_file):
_copy_file(cmake_file, package_file, hide_listing)
else:
_consolidate_package_modules(cmake_source_dir, packages, package_dir, py_modules, package_data, hide_listing)
original_package_data = kw.get("package_data", {}).copy()
_consolidate_package_data_files(original_package_data, package_prefixes, hide_listing)
for data_file in original_manifestin_data_files:
dest_data_file = os.path.join(CMAKE_INSTALL_DIR(), data_file)
_copy_file(data_file, dest_data_file, hide_listing)
kw["package_data"] = package_data
kw["package_dir"] = {
package: (
os.path.join(CMAKE_INSTALL_DIR(), prefix)
if os.path.exists(os.path.join(CMAKE_INSTALL_DIR(), prefix))
else prefix
)
for prefix, package in package_prefixes
}
kw["scripts"] = [
os.path.join(CMAKE_INSTALL_DIR(), script) if mask else script for script, mask in new_scripts.items()
]
kw["data_files"] = [(parent_dir, list(file_set)) for parent_dir, file_set in data_files.items()]
if "zip_safe" not in kw:
kw["zip_safe"] = False
# Adapted from espdev/ITKPythonInstaller/setup.py.in
class BinaryDistribution(upstream_Distribution): # pylint: disable=missing-class-docstring
def has_ext_modules(self): # pylint: disable=no-self-use,missing-function-docstring
return has_cmakelists
kw["distclass"] = BinaryDistribution
print("")
return setuptools.setup(*args, **kw)
def _collect_package_prefixes(package_dir, packages):
return list(
sorted(
((package_dir[package].replace(".", "/"), package) for package in packages),
key=lambda tup: len(tup[0]),
reverse=True,
)
)
def _classify_installed_files(
install_paths,
package_data,
package_prefixes,
py_modules,
new_py_modules,
scripts,
new_scripts,
data_files,
cmake_source_dir,
_cmake_install_dir,
):
assert not os.path.isabs(cmake_source_dir)
assert cmake_source_dir != "."
install_root = os.path.join(os.getcwd(), CMAKE_INSTALL_DIR())
for path in install_paths:
# if this installed file is not within the project root, complain and
# exit
if not to_platform_path(path).startswith(CMAKE_INSTALL_DIR()):
raise SKBuildError(
(
"\n CMake-installed files must be within the project root.\n"
" Project Root : {}\n"
" Violating File: {}\n"
).format(install_root, to_platform_path(path))
)
# peel off the 'skbuild' prefix
path = to_unix_path(os.path.relpath(path, CMAKE_INSTALL_DIR()))
_classify_file(
path, package_data, package_prefixes, py_modules, new_py_modules, scripts, new_scripts, data_files
)
def _classify_file(path, package_data, package_prefixes, py_modules, new_py_modules, scripts, new_scripts, data_files):
found_package = False
found_module = False
found_script = False
path = to_unix_path(path)
# check to see if path is part of a package
for prefix, package in package_prefixes:
if path.startswith(prefix + "/"):
# peel off the package prefix
path = to_unix_path(os.path.relpath(path, prefix))
package_file_list = package_data.get(package, [])
package_file_list.append(path)
package_data[package] = package_file_list
found_package = True
break
if found_package:
return
# If control reaches this point, then this installed file is not part of
# a package.
# check if path is a module
for module in py_modules:
if path.replace("/", ".") == ".".join((module, "py")):
new_py_modules[module] = True
found_module = True
break
if found_module:
return
# If control reaches this point, then this installed file is not a
# module
# if the file is a script, mark the corresponding script
for script in scripts:
if path == script:
new_scripts[script] = True
found_script = True
break
if found_script:
return
# If control reaches this point, then this installed file is not a
# script
# If control reaches this point, then we have installed files that are
# not part of a package, not a module, nor a script. Without any other
# information, we can only treat it as a generic data file.
parent_dir = os.path.dirname(path)
file_set = data_files.get(parent_dir)
if file_set is None:
file_set = set()
data_files[parent_dir] = file_set
file_set.add(os.path.join(CMAKE_INSTALL_DIR(), path))
def _copy_file(src_file, dest_file, hide_listing=True):
# Create directory if needed
dest_dir = os.path.dirname(dest_file)
if dest_dir != "" and not os.path.exists(dest_dir):
if not hide_listing:
print("creating directory {}".format(dest_dir))
mkdir_p(dest_dir)
# Copy file
if not hide_listing:
print("copying {} -> {}".format(src_file, dest_file))
copyfile(src_file, dest_file)
copymode(src_file, dest_file)
def _consolidate_package_modules(cmake_source_dir, packages, package_dir, py_modules, package_data, hide_listing):
try:
# Search for python modules in both the current directory
# and cmake install tree.
modules = PythonModuleFinder(
packages, package_dir, py_modules, alternative_build_base=CMAKE_INSTALL_DIR()
).find_all_modules()
except DistutilsError as msg:
raise SystemExit("error: {}".format(str(msg)))
print("")
for entry in modules:
# Check if module file should be copied into the CMake install tree.
if _package_data_contain_module(entry, package_data):
continue
(package, _, src_module_file) = entry
# Copy missing module file
if os.path.exists(src_module_file):
dest_module_file = os.path.join(CMAKE_INSTALL_DIR(), src_module_file)
_copy_file(src_module_file, dest_module_file, hide_listing)
# Since the mapping in package_data expects the package to be associated
# with a list of files relative to the directory containing the package,
# the following section makes sure to strip the redundant part of the
# module file path.
# The redundant part should be stripped for both cmake_source_dir and
# the package.
package_parts = []
if cmake_source_dir:
package_parts = cmake_source_dir.split(os.path.sep)
package_parts += package.split(".")
stripped_module_file = strip_package(package_parts, src_module_file)
# Update list of files associated with the corresponding package
try:
package_data[package].append(stripped_module_file)
except KeyError:
package_data[package] = [stripped_module_file]
def _consolidate_package_data_files(original_package_data, package_prefixes, hide_listing):
project_root = os.getcwd()
for prefix, package in package_prefixes:
if package not in original_package_data:
continue
raw_patterns = original_package_data[package]
for pattern in raw_patterns:
expanded_package_dir = os.path.join(project_root, prefix, pattern)
for src_data_file in glob(expanded_package_dir):
full_prefix_length = len(os.path.join(project_root, prefix)) + 1
data_file = src_data_file[full_prefix_length:]
dest_data_file = os.path.join(CMAKE_INSTALL_DIR(), prefix, data_file)
_copy_file(src_data_file, dest_data_file, hide_listing)
| true
| true
|
790c283a5a74d9bcd5fc3079c028df79210d6a0c
| 187
|
py
|
Python
|
Course/functions/example_12.py
|
zevgenia/Python_shultais
|
e51c31de221c5e7f36ede857a960138009ec8a05
|
[
"Apache-2.0"
] | null | null | null |
Course/functions/example_12.py
|
zevgenia/Python_shultais
|
e51c31de221c5e7f36ede857a960138009ec8a05
|
[
"Apache-2.0"
] | null | null | null |
Course/functions/example_12.py
|
zevgenia/Python_shultais
|
e51c31de221c5e7f36ede857a960138009ec8a05
|
[
"Apache-2.0"
] | null | null | null |
#sum = 10
def func1():
#sum = 20
print('Local1:', sum)
def func2():
#sum = 30
print('Local 2:', sum)
func2()
func1()
print("Global:", sum([1, 2, 3]))
| 11.6875
| 32
| 0.459893
|
def func1():
print('Local1:', sum)
def func2():
print('Local 2:', sum)
func2()
func1()
print("Global:", sum([1, 2, 3]))
| true
| true
|
790c28763c9989b994ede509dbfb72af89e2c404
| 16,381
|
py
|
Python
|
chainer/training/updaters/multiprocess_parallel_updater.py
|
hikjik/chainer
|
324a1bc1ea3edd63d225e4a87ed0a36af7fd712f
|
[
"MIT"
] | 1
|
2019-03-09T07:39:07.000Z
|
2019-03-09T07:39:07.000Z
|
chainer/training/updaters/multiprocess_parallel_updater.py
|
hitsgub/chainer
|
20d4d70f5cdacc1f24f243443f5bebc2055c8f8e
|
[
"MIT"
] | null | null | null |
chainer/training/updaters/multiprocess_parallel_updater.py
|
hitsgub/chainer
|
20d4d70f5cdacc1f24f243443f5bebc2055c8f8e
|
[
"MIT"
] | null | null | null |
import multiprocessing
import warnings
import six
from chainer.backends import cuda
from chainer.dataset import convert
from chainer import reporter
from chainer.training.updaters import standard_updater
try:
from cupy.cuda import nccl
_available = True
except Exception:
_available = False
import numpy
class _Worker(multiprocessing.Process):
def __init__(self, proc_id, pipe, master):
super(_Worker, self).__init__()
self.proc_id = proc_id
self.pipe = pipe
self.converter = master.converter
self.model = master._master
self.device = master._devices[proc_id]
self.iterator = master._mpu_iterators[proc_id]
self.n_devices = len(master._devices)
def setup(self):
_, comm_id = self.pipe.recv()
self.comm = nccl.NcclCommunicator(self.n_devices, comm_id,
self.proc_id)
self.model.to_gpu(self.device)
self.reporter = reporter.Reporter()
self.reporter.add_observer('main', self.model)
self.reporter.add_observers('main',
self.model.namedlinks(skipself=True))
def run(self):
dev = cuda.Device(self.device)
dev.use()
self.setup()
while True:
job, data = self.pipe.recv()
if job == 'finalize':
dev.synchronize()
break
if job == 'update':
# For reducing memory
self.model.cleargrads()
batch = self.converter(self.iterator.next(), self.device)
with self.reporter.scope({}): # pass dummy observation
loss = _calc_loss(self.model, batch)
self.model.cleargrads()
loss.backward()
del loss
gg = gather_grads(self.model)
nccl_data_type = _get_nccl_data_type(gg.dtype)
null_stream = cuda.Stream.null
self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size,
nccl_data_type, nccl.NCCL_SUM, 0,
null_stream.ptr)
del gg
self.model.cleargrads()
gp = gather_params(self.model)
nccl_data_type = _get_nccl_data_type(gp.dtype)
self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0,
null_stream.ptr)
scatter_params(self.model, gp)
del gp
class MultiprocessParallelUpdater(standard_updater.StandardUpdater):
"""Implementation of a multiprocess parallel GPU Updater.
This is an implementation of :class:`Updater` that uses multiple GPUs
with multi-process data parallelism. It uses Nvidia NCCL for communication
between multiple GPUs.
It behaves similarly to
:class:`~chainer.training.updaters.StandardUpdater`.
The update routine is modified to support data-parallel
computation on multiple GPUs in one machine.
It is based on synchronous parallel SGD: it
parallelizes the gradient computation over a mini-batch, and updates the
parameters only in the main device.
It does not transfer the values collected by :class:`Reporter` in the sub
devices to the main device. So you can only see the reported values in
the main device.
Args:
iterators: List of dataset iterator for the training dataset. The
number of the iterators must be same to the number of GPUs you use.
optimizer: Optimizer to update parameters. The model should be attached
to the optimizer.
converter: Converter function to build input arrays. Each batch
extracted by the iterator is split equally between the devices and
then passed with corresponding ``device`` option to this function.
:func:`~chainer.dataset.concat_examples` is used by default.
devices: Dictionary or list of devices to which the training data is
sent. The master device will be the first one in the list or the
value attached to the key ``'main'``.
auto_new_epoch (bool): If ``True``,
:meth:`~chainer.Optimizer.new_epoch` of the main optimizer is
automatically called when the ``is_new_epoch`` attribute of the
main iterator is ``True``.
"""
def __init__(self, iterators, optimizer, converter=convert.concat_examples,
devices=None, auto_new_epoch=True):
if not MultiprocessParallelUpdater.available():
raise Exception(
'NCCL is not enabled. MultiprocessParallelUpdater '
'requires NCCL.\n'
'Please reinstall CuPy after you install NCCL.\n'
'(see https://docs-cupy.chainer.org/en/latest/install.html)')
try:
cuda.cupy.cuda.driver.ctxGetCurrent()
_cuda_initialized = True
except cuda.cupy.cuda.driver.CUDADriverError:
# The context is not initialized, it will be fine.
_cuda_initialized = False
if _cuda_initialized:
raise RuntimeError(
'The CUDA context has been already initialized. '
'MultiprocessParallelUpdater assumes the context is '
'uninitialized. Please do not call CUDA API before '
'MultiprocessParallelUpdater creates processes.')
assert len(iterators) == len(devices)
for iterator in iterators[1:]:
assert len(iterator.dataset) == len(iterators[0].dataset)
# Correct optimizer parameters for new minibatch size
optim = optimizer.__class__.__name__
if optim in ('Adam', 'AdaGrad', 'RMSprop'):
optimizer.eps *= len(devices)
warnings.warn('optimizer.eps is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.eps))
elif optim in ('RMSpropGraves', 'AdaDelta'):
optimizer.eps *= len(devices) ** 2 # not quite right for AdaDelta
warnings.warn('optimizer.eps is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.eps))
elif hasattr(optimizer, 'lr'):
optimizer.lr /= len(devices)
warnings.warn('optimizer.lr is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.lr))
super(MultiprocessParallelUpdater, self).__init__(
iterator=iterators[0],
optimizer=optimizer,
converter=converter,
auto_new_epoch=auto_new_epoch,
)
if isinstance(devices, dict):
devices = devices.copy()
main = devices.pop('main')
devices = list(six.itervalues(devices))
devices = [main] + devices
elif isinstance(devices, (list, tuple)):
devices = list(devices)
else:
raise ValueError(
'devices argument should be either dict, list or tuple,'
' but {} was given.'.format(type(devices)))
if devices is None or any(device is None for device in devices):
raise ValueError('must specify GPU devices')
self._master = optimizer.target
self._devices = devices
self._mpu_iterators = iterators
self._initialized = False
self._pipes = []
self._workers = []
self.comm = None
@staticmethod
def available():
return _available
def _send_message(self, message):
for pipe in self._pipes:
pipe.send(message)
def setup_workers(self):
if self._initialized:
return
self._initialized = True
self._master.cleargrads()
for i in six.moves.range(1, len(self._devices)):
pipe, worker_end = multiprocessing.Pipe()
worker = _Worker(i, worker_end, self)
worker.start()
self._workers.append(worker)
self._pipes.append(pipe)
with cuda.Device(self._devices[0]):
self._master.to_gpu(self._devices[0])
if len(self._devices) > 1:
comm_id = nccl.get_unique_id()
self._send_message(('set comm_id', comm_id))
self.comm = nccl.NcclCommunicator(len(self._devices),
comm_id, 0)
def update_core(self):
self.setup_workers()
self._send_message(('update', None))
with cuda.Device(self._devices[0]):
# For reducing memory
self._master.cleargrads()
optimizer = self.get_optimizer('main')
iterator = self.get_iterator('main')
batch = iterator.next()
batch = self.converter(batch, self._devices[0])
loss = _calc_loss(self._master, batch)
self._master.cleargrads()
loss.backward()
# NCCL: reduce grads
null_stream = cuda.Stream.null
if self.comm is not None:
gg = gather_grads(self._master)
nccl_data_type = _get_nccl_data_type(gg.dtype)
self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size,
nccl_data_type, nccl.NCCL_SUM,
0, null_stream.ptr)
scatter_grads(self._master, gg)
del gg
optimizer.update()
if self.comm is not None:
gp = gather_params(self._master)
nccl_data_type = _get_nccl_data_type(gp.dtype)
self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type,
0, null_stream.ptr)
if self.auto_new_epoch and iterator.is_new_epoch:
optimizer.new_epoch(auto=True)
def finalize(self):
self._send_message(('finalize', None))
for worker in self._workers:
worker.join()
def _calc_loss(model, in_arrays):
if isinstance(in_arrays, tuple):
return model(*in_arrays)
elif isinstance(in_arrays, dict):
return model(**in_arrays)
else:
return model(in_arrays)
def size_num_grads(link):
"""Count total size of all gradient arrays of a given link
Args:
link (chainer.link.Link): Target link object.
"""
size = 0
num = 0
for param in link.params():
if param.size == 0:
continue
size += param.size
num += 1
return size, num
def _memcpy_gather():
return cuda.elementwise(
'raw T ptrs, raw X dtypes, raw Y info',
'raw float32 dst',
'''
int id_min = id_pre;
int id_max = num_src;
while (id_max - id_min > 1) {
int id = (id_max + id_min) / 2;
if (i < info[id]) id_max = id;
else id_min = id;
}
int id = id_min;
int i_dst = i;
int i_src = i;
if (id > 0) i_src -= info[id];
dst[i_dst] = 0;
if (ptrs[id] != NULL) {
if (dtypes[id] == 0) { // fp32
float *src = reinterpret_cast<float *>(ptrs[id]);
dst[i_dst] = src[i_src];
}
else { // fp16
float16 *src = reinterpret_cast<float16 *>(ptrs[id]);
dst[i_dst] = static_cast<float>(src[i_src]);
}
}
id_pre = id;
''',
'_memcpy_gather',
loop_prep='''
int num_src = info[0];
int id_pre = 0;
''')
def _gather(link, target):
size, num = size_num_grads(link)
ptrs = numpy.empty(num, dtype=numpy.uint64)
dtypes = numpy.empty(num, dtype=numpy.int8)
info = numpy.empty(num + 1, dtype=numpy.int32)
info[0] = 0
i = 0
for _, param in sorted(link.namedparams()):
if param.size == 0:
continue
ptrs[i] = 0 # NULL pointer
d = getattr(param, target)
if d is not None:
ptrs[i] = d.data.ptr
dtypes[i] = 0 # fp32
if param.dtype == numpy.float16:
dtypes[i] = 1 # fp16
info[i + 1] = info[i] + param.size
i += 1
info[0] = num
ptrs = cuda.to_gpu(ptrs)
dtypes = cuda.to_gpu(dtypes)
info = cuda.to_gpu(info)
return _memcpy_gather()(ptrs, dtypes, info, size=size)
def gather_grads(link):
"""Put together all gradient arrays and make a single array
Args:
link (chainer.link.Link): Target link object.
Return:
cupy.ndarray
"""
if link.xp is numpy:
raise RuntimeError('gather_grads works only on GPU.')
return _gather(link, 'grad')
def gather_params(link):
"""Put together all gradient arrays and make a single array
Args:
link (chainer.link.Link): Target link object.
Return:
cupy.ndarray
"""
if link.xp is numpy:
raise RuntimeError('Link.gather_params works only on GPU.')
return _gather(link, 'data')
def _memcpy_scatter():
return cuda.elementwise(
'raw T ptrs, raw X dtypes, raw Y info, raw float32 array',
'',
'''
int id_min = id_pre;
int id_max = num_src;
while (id_max - id_min > 1) {
int id = (id_max + id_min) / 2;
if (i < info[id]) id_max = id;
else id_min = id;
}
int id = id_min;
int i_src = i;
int i_dst = i;
if (id > 0) i_dst -= info[id];
if (ptrs[id] != NULL) {
if (dtypes[id] == 0) { // fp32
float *dst = reinterpret_cast<float *>(ptrs[id]);
dst[i_dst] = array[i_src];
}
else { // fp16
float16 *dst = reinterpret_cast<float16 *>(ptrs[id]);
dst[i_dst] = static_cast<float16>(array[i_src]);
}
}
id_pre = id;
''',
'_memcpy_scatter',
loop_prep='''
int num_src = info[0];
int id_pre = 0;
''')
def _scatter(link, array, target):
size, num = size_num_grads(link)
ptrs = numpy.zeros(num, dtype=numpy.uint64)
dtypes = numpy.zeros(num, dtype=numpy.int8)
info = numpy.zeros(num + 1, dtype=numpy.int32)
info[0] = 0
i = 0
for _, param in sorted(link.namedparams()):
if param.size == 0:
continue
ptrs[i] = 0 # NULL pointer
d = getattr(param, target)
if d is None:
d = cuda.cupy.zeros(param.shape, dtype=param.dtype)
setattr(param, target, d)
ptrs[i] = d.data.ptr
dtypes[i] = 0 # fp32
if param.dtype == numpy.float16:
dtypes[i] = 1 # fp16
info[i + 1] = info[i] + param.size
i += 1
if i != num:
raise()
info[0] = num
ptrs = cuda.to_gpu(ptrs)
dtypes = cuda.to_gpu(dtypes)
info = cuda.to_gpu(info)
return _memcpy_scatter()(ptrs, dtypes, info, array, size=size)
def scatter_grads(link, array):
"""Put back contents of the specified array to the related gradient arrays
Args:
link (chainer.link.Link): Target link object.
array (cupy.ndarray): gathered array created by gather_grads()
"""
return _scatter(link, array, 'grad')
def scatter_params(link, array):
"""Put back contents of the specified array to the related gradient arrays
Args:
link (chainer.link.Link): Target link object.
array (cupy.ndarray): gathered array created by gather_params()
"""
return _scatter(link, array, 'data')
def _get_nccl_data_type(dtype):
"""Get data type for NCCL"""
if dtype == numpy.float32:
nccl_data_type = nccl.NCCL_FLOAT
elif dtype == numpy.float16:
nccl_data_type = nccl.NCCL_HALF
elif dtype == numpy.float64:
nccl_data_type = nccl.NCCL_DOUBLE
else:
raise RuntimeError('Unexpected data type:{}'.format(dtype))
return nccl_data_type
| 33.567623
| 79
| 0.561687
|
import multiprocessing
import warnings
import six
from chainer.backends import cuda
from chainer.dataset import convert
from chainer import reporter
from chainer.training.updaters import standard_updater
try:
from cupy.cuda import nccl
_available = True
except Exception:
_available = False
import numpy
class _Worker(multiprocessing.Process):
def __init__(self, proc_id, pipe, master):
super(_Worker, self).__init__()
self.proc_id = proc_id
self.pipe = pipe
self.converter = master.converter
self.model = master._master
self.device = master._devices[proc_id]
self.iterator = master._mpu_iterators[proc_id]
self.n_devices = len(master._devices)
def setup(self):
_, comm_id = self.pipe.recv()
self.comm = nccl.NcclCommunicator(self.n_devices, comm_id,
self.proc_id)
self.model.to_gpu(self.device)
self.reporter = reporter.Reporter()
self.reporter.add_observer('main', self.model)
self.reporter.add_observers('main',
self.model.namedlinks(skipself=True))
def run(self):
dev = cuda.Device(self.device)
dev.use()
self.setup()
while True:
job, data = self.pipe.recv()
if job == 'finalize':
dev.synchronize()
break
if job == 'update':
self.model.cleargrads()
batch = self.converter(self.iterator.next(), self.device)
with self.reporter.scope({}):
loss = _calc_loss(self.model, batch)
self.model.cleargrads()
loss.backward()
del loss
gg = gather_grads(self.model)
nccl_data_type = _get_nccl_data_type(gg.dtype)
null_stream = cuda.Stream.null
self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size,
nccl_data_type, nccl.NCCL_SUM, 0,
null_stream.ptr)
del gg
self.model.cleargrads()
gp = gather_params(self.model)
nccl_data_type = _get_nccl_data_type(gp.dtype)
self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0,
null_stream.ptr)
scatter_params(self.model, gp)
del gp
class MultiprocessParallelUpdater(standard_updater.StandardUpdater):
def __init__(self, iterators, optimizer, converter=convert.concat_examples,
devices=None, auto_new_epoch=True):
if not MultiprocessParallelUpdater.available():
raise Exception(
'NCCL is not enabled. MultiprocessParallelUpdater '
'requires NCCL.\n'
'Please reinstall CuPy after you install NCCL.\n'
'(see https://docs-cupy.chainer.org/en/latest/install.html)')
try:
cuda.cupy.cuda.driver.ctxGetCurrent()
_cuda_initialized = True
except cuda.cupy.cuda.driver.CUDADriverError:
_cuda_initialized = False
if _cuda_initialized:
raise RuntimeError(
'The CUDA context has been already initialized. '
'MultiprocessParallelUpdater assumes the context is '
'uninitialized. Please do not call CUDA API before '
'MultiprocessParallelUpdater creates processes.')
assert len(iterators) == len(devices)
for iterator in iterators[1:]:
assert len(iterator.dataset) == len(iterators[0].dataset)
optim = optimizer.__class__.__name__
if optim in ('Adam', 'AdaGrad', 'RMSprop'):
optimizer.eps *= len(devices)
warnings.warn('optimizer.eps is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.eps))
elif optim in ('RMSpropGraves', 'AdaDelta'):
optimizer.eps *= len(devices) ** 2
warnings.warn('optimizer.eps is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.eps))
elif hasattr(optimizer, 'lr'):
optimizer.lr /= len(devices)
warnings.warn('optimizer.lr is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.lr))
super(MultiprocessParallelUpdater, self).__init__(
iterator=iterators[0],
optimizer=optimizer,
converter=converter,
auto_new_epoch=auto_new_epoch,
)
if isinstance(devices, dict):
devices = devices.copy()
main = devices.pop('main')
devices = list(six.itervalues(devices))
devices = [main] + devices
elif isinstance(devices, (list, tuple)):
devices = list(devices)
else:
raise ValueError(
'devices argument should be either dict, list or tuple,'
' but {} was given.'.format(type(devices)))
if devices is None or any(device is None for device in devices):
raise ValueError('must specify GPU devices')
self._master = optimizer.target
self._devices = devices
self._mpu_iterators = iterators
self._initialized = False
self._pipes = []
self._workers = []
self.comm = None
@staticmethod
def available():
return _available
def _send_message(self, message):
for pipe in self._pipes:
pipe.send(message)
def setup_workers(self):
if self._initialized:
return
self._initialized = True
self._master.cleargrads()
for i in six.moves.range(1, len(self._devices)):
pipe, worker_end = multiprocessing.Pipe()
worker = _Worker(i, worker_end, self)
worker.start()
self._workers.append(worker)
self._pipes.append(pipe)
with cuda.Device(self._devices[0]):
self._master.to_gpu(self._devices[0])
if len(self._devices) > 1:
comm_id = nccl.get_unique_id()
self._send_message(('set comm_id', comm_id))
self.comm = nccl.NcclCommunicator(len(self._devices),
comm_id, 0)
def update_core(self):
self.setup_workers()
self._send_message(('update', None))
with cuda.Device(self._devices[0]):
self._master.cleargrads()
optimizer = self.get_optimizer('main')
iterator = self.get_iterator('main')
batch = iterator.next()
batch = self.converter(batch, self._devices[0])
loss = _calc_loss(self._master, batch)
self._master.cleargrads()
loss.backward()
null_stream = cuda.Stream.null
if self.comm is not None:
gg = gather_grads(self._master)
nccl_data_type = _get_nccl_data_type(gg.dtype)
self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size,
nccl_data_type, nccl.NCCL_SUM,
0, null_stream.ptr)
scatter_grads(self._master, gg)
del gg
optimizer.update()
if self.comm is not None:
gp = gather_params(self._master)
nccl_data_type = _get_nccl_data_type(gp.dtype)
self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type,
0, null_stream.ptr)
if self.auto_new_epoch and iterator.is_new_epoch:
optimizer.new_epoch(auto=True)
def finalize(self):
self._send_message(('finalize', None))
for worker in self._workers:
worker.join()
def _calc_loss(model, in_arrays):
if isinstance(in_arrays, tuple):
return model(*in_arrays)
elif isinstance(in_arrays, dict):
return model(**in_arrays)
else:
return model(in_arrays)
def size_num_grads(link):
size = 0
num = 0
for param in link.params():
if param.size == 0:
continue
size += param.size
num += 1
return size, num
def _memcpy_gather():
return cuda.elementwise(
'raw T ptrs, raw X dtypes, raw Y info',
'raw float32 dst',
'''
int id_min = id_pre;
int id_max = num_src;
while (id_max - id_min > 1) {
int id = (id_max + id_min) / 2;
if (i < info[id]) id_max = id;
else id_min = id;
}
int id = id_min;
int i_dst = i;
int i_src = i;
if (id > 0) i_src -= info[id];
dst[i_dst] = 0;
if (ptrs[id] != NULL) {
if (dtypes[id] == 0) { // fp32
float *src = reinterpret_cast<float *>(ptrs[id]);
dst[i_dst] = src[i_src];
}
else { // fp16
float16 *src = reinterpret_cast<float16 *>(ptrs[id]);
dst[i_dst] = static_cast<float>(src[i_src]);
}
}
id_pre = id;
''',
'_memcpy_gather',
loop_prep='''
int num_src = info[0];
int id_pre = 0;
''')
def _gather(link, target):
size, num = size_num_grads(link)
ptrs = numpy.empty(num, dtype=numpy.uint64)
dtypes = numpy.empty(num, dtype=numpy.int8)
info = numpy.empty(num + 1, dtype=numpy.int32)
info[0] = 0
i = 0
for _, param in sorted(link.namedparams()):
if param.size == 0:
continue
ptrs[i] = 0
d = getattr(param, target)
if d is not None:
ptrs[i] = d.data.ptr
dtypes[i] = 0
if param.dtype == numpy.float16:
dtypes[i] = 1
info[i + 1] = info[i] + param.size
i += 1
info[0] = num
ptrs = cuda.to_gpu(ptrs)
dtypes = cuda.to_gpu(dtypes)
info = cuda.to_gpu(info)
return _memcpy_gather()(ptrs, dtypes, info, size=size)
def gather_grads(link):
if link.xp is numpy:
raise RuntimeError('gather_grads works only on GPU.')
return _gather(link, 'grad')
def gather_params(link):
if link.xp is numpy:
raise RuntimeError('Link.gather_params works only on GPU.')
return _gather(link, 'data')
def _memcpy_scatter():
return cuda.elementwise(
'raw T ptrs, raw X dtypes, raw Y info, raw float32 array',
'',
'''
int id_min = id_pre;
int id_max = num_src;
while (id_max - id_min > 1) {
int id = (id_max + id_min) / 2;
if (i < info[id]) id_max = id;
else id_min = id;
}
int id = id_min;
int i_src = i;
int i_dst = i;
if (id > 0) i_dst -= info[id];
if (ptrs[id] != NULL) {
if (dtypes[id] == 0) { // fp32
float *dst = reinterpret_cast<float *>(ptrs[id]);
dst[i_dst] = array[i_src];
}
else { // fp16
float16 *dst = reinterpret_cast<float16 *>(ptrs[id]);
dst[i_dst] = static_cast<float16>(array[i_src]);
}
}
id_pre = id;
''',
'_memcpy_scatter',
loop_prep='''
int num_src = info[0];
int id_pre = 0;
''')
def _scatter(link, array, target):
size, num = size_num_grads(link)
ptrs = numpy.zeros(num, dtype=numpy.uint64)
dtypes = numpy.zeros(num, dtype=numpy.int8)
info = numpy.zeros(num + 1, dtype=numpy.int32)
info[0] = 0
i = 0
for _, param in sorted(link.namedparams()):
if param.size == 0:
continue
ptrs[i] = 0
d = getattr(param, target)
if d is None:
d = cuda.cupy.zeros(param.shape, dtype=param.dtype)
setattr(param, target, d)
ptrs[i] = d.data.ptr
dtypes[i] = 0
if param.dtype == numpy.float16:
dtypes[i] = 1
info[i + 1] = info[i] + param.size
i += 1
if i != num:
raise()
info[0] = num
ptrs = cuda.to_gpu(ptrs)
dtypes = cuda.to_gpu(dtypes)
info = cuda.to_gpu(info)
return _memcpy_scatter()(ptrs, dtypes, info, array, size=size)
def scatter_grads(link, array):
return _scatter(link, array, 'grad')
def scatter_params(link, array):
return _scatter(link, array, 'data')
def _get_nccl_data_type(dtype):
if dtype == numpy.float32:
nccl_data_type = nccl.NCCL_FLOAT
elif dtype == numpy.float16:
nccl_data_type = nccl.NCCL_HALF
elif dtype == numpy.float64:
nccl_data_type = nccl.NCCL_DOUBLE
else:
raise RuntimeError('Unexpected data type:{}'.format(dtype))
return nccl_data_type
| true
| true
|
790c28b55d314170ea807611560eda7af16dbf0b
| 13,628
|
py
|
Python
|
deutscheflash.py
|
n-Holmes/deutscheflash
|
1f974a3fffe771c0e552fa40123b27fa3f24674f
|
[
"MIT"
] | null | null | null |
deutscheflash.py
|
n-Holmes/deutscheflash
|
1f974a3fffe771c0e552fa40123b27fa3f24674f
|
[
"MIT"
] | null | null | null |
deutscheflash.py
|
n-Holmes/deutscheflash
|
1f974a3fffe771c0e552fa40123b27fa3f24674f
|
[
"MIT"
] | null | null | null |
"""A simple CLI app to practice grammatical genders of German nouns."""
import argparse
import json
import pathlib
import pandas as pd
class WordList:
"""Data structure to store a pandas dataframe and some structural details.
Args:
path (pathlib.Path or None): The path (without suffix) to a wordlist.
If there is no current list at the path, will create a new list.
If no path is provided the WordList will not be fully initialized and will
require a subsequent call of `load` or `new`.
"""
def __init__(self, path=None):
self.words = None
self.structure = {}
if path is not None:
self.load(path)
def load(self, path: pathlib.Path):
"""Load stored data."""
try:
self.words = pd.read_csv(path.with_suffix(".csv"))
with path.with_suffix(".json").open() as f:
self.structure = json.loads(f.read())
self.words.set_index(self.structure["index"], inplace=True)
except FileNotFoundError as exception:
raise FileNotFoundError(
"No word list found with the specified name."
) from exception
def new(self, language: str = "german", score_inertia: int = 2):
"""Create a new wordlist.
Args:
language (str): The name of a language in the GENDERS dictionary.
score_inertia (int): Determines how resistant scores are to change.
Must be a positive integer. Higher values will require more consecutive
correct answers to reduce the frequency of a specific word.
"""
gender_options = get_languages()
try:
genders = gender_options[language]
except KeyError as exception:
raise ValueError(f"Unknown language: {language}") from exception
columns = ["Word", "Gender", "Correct", "Wrong", "Weight"]
self.structure = {
"language": language,
"genders": genders,
"aliases": self._get_aliases(genders),
"default guesses": score_inertia,
"index": "Word",
"column count": 3,
}
self.words = pd.DataFrame(columns=columns)
self.words.set_index(self.structure["index"], inplace=True)
def save(self, path: pathlib.Path):
"""Saves words to a .csv file and structure to a .json."""
self.words.to_csv(path.with_suffix(".csv"))
with path.with_suffix(".json").open(mode="w") as f:
f.write(json.dumps(self.structure))
def format_gender(self, gender_string: str):
"""Attempts to find a matching gender for gender_string.
Args:
gender_string (str): A gender for the word list or an alias of a gender.
Returns:
The associated gender.
Raises:
ValueError: `gender_string` does not match any gender or alias.
"""
gender_string = gender_string.lower()
if gender_string in self.structure["genders"]:
return gender_string
if gender_string in self.structure["aliases"]:
return self.structure["aliases"][gender_string]
raise ValueError(f"Unknown gender: {gender_string}")
def add(self, gender: str, word: str):
"""Add a new word to the list.
Args:
gender (str): The gender of the word being added.
word (str): The word to add.
Raises:
ValueError: `gender` does not match the current wordlist or the word is
already present in the list.
"""
gender = self.format_gender(gender)
word = word.capitalize()
if gender not in self.structure["genders"]:
raise ValueError(
f"{gender} is not a valid gender for the current wordlist."
)
if word in self.words.index:
raise ValueError(f"{word} is already included.")
n_genders = len(self.structure["genders"])
row = [
gender,
self.structure["default guesses"],
self.structure["default guesses"] * (n_genders - 1),
(n_genders - 1) / n_genders,
]
self.words.loc[word] = row
def get_words(self, n: int, distribution: str = "weighted"):
"""Selects and returns a sample of words and their genders.
Args:
n (int): The number of results wanted.
distribution (str): The sampling method to use. Either `uniform` or
`weighted`.
Yields:
A tuple of strings in the format (word, gender).
"""
if distribution == "uniform":
sample = self.words.sample(n=n)
elif distribution == "weighted":
sample = self.words.sample(n=n, weights="Weight")
else:
raise ValueError(f"Unknown value for distribution: {distribution}")
for row in sample.iterrows():
yield row[0], row[1].Gender
def update_weight(self, word, guess):
"""Update the weighting on a word based on the most recent guess.
Args:
word (str): The word to update. Should be in the index of self.words.
guess (bool): Whether the guess was correct or not.
"""
row = self.words.loc[word]
if guess:
row.Correct += 1
else:
row.Wrong += 1
n_genders = len(self.structure["genders"])
total = row.Correct + row.Wrong
if not total % n_genders:
# Throw away some data as evenly as possible to allow for change over time
# Never throw away the last negative result to avoid question being lost.
if row.Correct:
wrongs_to_throw = min(row.Wrong - 1, n_genders - 1)
row.Wrong -= wrongs_to_throw
row.Correct -= n_genders - wrongs_to_throw
else:
row.wrong -= n_genders
row.Weight = row.Wrong / (row.Correct + row.Wrong)
self.words.loc[word] = row
@staticmethod
def _get_aliases(genders: dict):
"""Create a dictionary of aliases and the genders they refer to.
May have issues if multiple genders have the same article or first letter.
"""
aliases = {}
for gender, article in genders.items():
aliases[gender[0]] = gender
aliases[article] = gender
return aliases
def force_console_input(
query: str,
allowable,
onfail: str = "Input not recognised, please try again.\n",
case_sensitive=False,
):
"""Get an input from the user matching some string in allowable.
Args:
query (str): The query to issue the user with.
allowable (str or container): The options which the user is allowed to submit.
If this is a string, acceptable answers will be substrings.
For containers acceptable answers will be elements of the container.
Returns:
The correct input returned
Raises:
IOError: A request to quit was submitted.
"""
if not allowable:
raise ValueError("At least one entry must be allowable.")
submission = input(query)
while True:
if not case_sensitive:
submission = submission.lower()
if submission in ("quit", "exit"):
raise IOError("Exit command received.")
if submission in allowable:
return submission
submission = input(onfail)
def get_languages():
"""Gets the language: genders dictionary."""
with open("genders.json", "r") as f:
return json.loads(f.read())
def main():
"""Orchestration function for the CLI."""
args = _parse_args()
path = pathlib.Path("lists", args.words)
try:
words = _load_words(path)
except IOError:
print("Exiting.")
return
if args.quiz_length is not None:
if args.quiz_length == 0:
print("Starting quiz in endless mode. Answer `quit` to end the quiz.")
correct, answered = _quiz_endless(words)
elif args.quiz_length > 0:
print(f"Starting quiz with length {args.quiz_length}...\n")
correct, answered, _ = _quiz(words, args.quiz_length)
else:
raise ValueError(f"Invalid quiz length: {args.quiz_length}.")
print(f"\nYou successfully answered {correct} out of {answered} questions!")
elif args.add_words:
print("Entering word addition mode...")
_add_words(words)
elif args.load_words:
print(f"Importing word file {args.load_words}...")
added, reps = _import_words(words, args.load_words)
print(f"{added} words successfully imported. {reps} duplicates skipped.")
elif args.reset_scores:
print("Resetting scores")
words = WordList()
words.new()
_import_words(words, path.with_suffix(".csv"))
_save_and_exit(words, path)
def _parse_args():
parser = argparse.ArgumentParser(
description="Flashcard app for German grammatical genders."
)
mode = parser.add_mutually_exclusive_group(required=True)
mode.add_argument(
"-q", "--quiz", type=int, help="Start the app in quiz mode.", dest="quiz_length"
)
mode.add_argument(
"-a",
"--add-words",
action="store_true",
help="Start the app in manual word addition mode.",
)
mode.add_argument(
"-l",
"--load-words",
help="Concatenates a prewritten list of words into the saved WordList.",
)
mode.add_argument(
"-r",
"--reset-scores",
action="store_true",
help="Reset all scores in the specified word list.",
)
parser.add_argument(
"-w", "--words", default="main_list", help="The name of the WordList to use."
)
return parser.parse_args()
def _load_words(path):
"""Encapsulates the loading/newfile creation logic."""
try:
words = WordList(path)
print("Words successfully loaded.")
except FileNotFoundError:
print(f"No word list found with given name.")
newfile = force_console_input(
"Would you like to create a new wordlist with the specified name? Y/N: ",
options=["y", "yes", "n", "no"],
)
if newfile[0] == "y":
words = WordList()
language = force_console_input(
query="Which language should be used?\n",
onfail="Language not recognised, please try again or check genders.json\n",
options=get_languages(),
)
words.new(language=language)
print(f"New WordList for language {language} successfully created.")
else:
raise IOError
return words
def _quiz(wordlist, quiz_length):
"""Runs a command line quiz of the specified length."""
pd.options.mode.chained_assignment = None # Suppresses SettingWithCopyWarning
answered, correct = 0, 0
for word, gender in wordlist.get_words(quiz_length):
guess = input(f"What is the gender of {word}? ").lower()
if guess in ("quit", "exit"):
break
answered += 1
try:
guess = wordlist.format_gender(guess)
except ValueError:
print("Unrecognised guess, skipping.\n")
continue
accurate = gender == guess
wordlist.update_weight(word, accurate)
if accurate:
print("Correct!\n")
correct += 1
else:
print(f"Incorrect! The correct gender is {gender}.\n")
return correct, answered, answered == quiz_length
def _quiz_endless(wordlist):
"""Runs quizzes in batches of 20 until quit or exit is answered."""
correct, answered = 0, 0
finished = False
while not finished:
results = _quiz(wordlist, 20)
correct += results[0]
answered += results[1]
finished = not results[2]
return correct, answered
def _add_words(wordlist):
"""CLI for adding words individually to the wordlist."""
print("Type a word with gender eg `m Mann` or `quit` when finished.")
while True:
input_str = input()
if input_str in ("quit", "exit"):
print("Exiting word addition mode...")
break
try:
gender, word = input_str.split()
wordlist.add(gender, word)
except ValueError as e:
print(e)
def _import_words(wordlist, import_path):
"""Loads words from a csv file at import_path into `wordlist`."""
new_words = pd.read_csv(import_path)
words_added = 0
repetitions = 0
for _, row in new_words.iterrows():
try:
wordlist.add(row.Gender, row.Word)
words_added += 1
except ValueError:
repetitions += 1
return words_added, repetitions
def _save_and_exit(wordlist, path):
while True:
try:
wordlist.save(path=path)
# TODO: Can WordList be made into a context manager?
print("WordList successfully saved, goodbye!")
break
except PermissionError:
print("PermissionError! File may be open in another window.")
retry = force_console_input("Try again? Y/N: ", ["y", "yes", "n", "no"])
if retry[0] == "y":
continue
else:
print("Exiting without saving changes.")
if __name__ == "__main__":
main()
| 32.370546
| 91
| 0.587981
|
import argparse
import json
import pathlib
import pandas as pd
class WordList:
def __init__(self, path=None):
self.words = None
self.structure = {}
if path is not None:
self.load(path)
def load(self, path: pathlib.Path):
try:
self.words = pd.read_csv(path.with_suffix(".csv"))
with path.with_suffix(".json").open() as f:
self.structure = json.loads(f.read())
self.words.set_index(self.structure["index"], inplace=True)
except FileNotFoundError as exception:
raise FileNotFoundError(
"No word list found with the specified name."
) from exception
def new(self, language: str = "german", score_inertia: int = 2):
gender_options = get_languages()
try:
genders = gender_options[language]
except KeyError as exception:
raise ValueError(f"Unknown language: {language}") from exception
columns = ["Word", "Gender", "Correct", "Wrong", "Weight"]
self.structure = {
"language": language,
"genders": genders,
"aliases": self._get_aliases(genders),
"default guesses": score_inertia,
"index": "Word",
"column count": 3,
}
self.words = pd.DataFrame(columns=columns)
self.words.set_index(self.structure["index"], inplace=True)
def save(self, path: pathlib.Path):
self.words.to_csv(path.with_suffix(".csv"))
with path.with_suffix(".json").open(mode="w") as f:
f.write(json.dumps(self.structure))
def format_gender(self, gender_string: str):
gender_string = gender_string.lower()
if gender_string in self.structure["genders"]:
return gender_string
if gender_string in self.structure["aliases"]:
return self.structure["aliases"][gender_string]
raise ValueError(f"Unknown gender: {gender_string}")
def add(self, gender: str, word: str):
gender = self.format_gender(gender)
word = word.capitalize()
if gender not in self.structure["genders"]:
raise ValueError(
f"{gender} is not a valid gender for the current wordlist."
)
if word in self.words.index:
raise ValueError(f"{word} is already included.")
n_genders = len(self.structure["genders"])
row = [
gender,
self.structure["default guesses"],
self.structure["default guesses"] * (n_genders - 1),
(n_genders - 1) / n_genders,
]
self.words.loc[word] = row
def get_words(self, n: int, distribution: str = "weighted"):
if distribution == "uniform":
sample = self.words.sample(n=n)
elif distribution == "weighted":
sample = self.words.sample(n=n, weights="Weight")
else:
raise ValueError(f"Unknown value for distribution: {distribution}")
for row in sample.iterrows():
yield row[0], row[1].Gender
def update_weight(self, word, guess):
row = self.words.loc[word]
if guess:
row.Correct += 1
else:
row.Wrong += 1
n_genders = len(self.structure["genders"])
total = row.Correct + row.Wrong
if not total % n_genders:
if row.Correct:
wrongs_to_throw = min(row.Wrong - 1, n_genders - 1)
row.Wrong -= wrongs_to_throw
row.Correct -= n_genders - wrongs_to_throw
else:
row.wrong -= n_genders
row.Weight = row.Wrong / (row.Correct + row.Wrong)
self.words.loc[word] = row
@staticmethod
def _get_aliases(genders: dict):
aliases = {}
for gender, article in genders.items():
aliases[gender[0]] = gender
aliases[article] = gender
return aliases
def force_console_input(
query: str,
allowable,
onfail: str = "Input not recognised, please try again.\n",
case_sensitive=False,
):
if not allowable:
raise ValueError("At least one entry must be allowable.")
submission = input(query)
while True:
if not case_sensitive:
submission = submission.lower()
if submission in ("quit", "exit"):
raise IOError("Exit command received.")
if submission in allowable:
return submission
submission = input(onfail)
def get_languages():
with open("genders.json", "r") as f:
return json.loads(f.read())
def main():
args = _parse_args()
path = pathlib.Path("lists", args.words)
try:
words = _load_words(path)
except IOError:
print("Exiting.")
return
if args.quiz_length is not None:
if args.quiz_length == 0:
print("Starting quiz in endless mode. Answer `quit` to end the quiz.")
correct, answered = _quiz_endless(words)
elif args.quiz_length > 0:
print(f"Starting quiz with length {args.quiz_length}...\n")
correct, answered, _ = _quiz(words, args.quiz_length)
else:
raise ValueError(f"Invalid quiz length: {args.quiz_length}.")
print(f"\nYou successfully answered {correct} out of {answered} questions!")
elif args.add_words:
print("Entering word addition mode...")
_add_words(words)
elif args.load_words:
print(f"Importing word file {args.load_words}...")
added, reps = _import_words(words, args.load_words)
print(f"{added} words successfully imported. {reps} duplicates skipped.")
elif args.reset_scores:
print("Resetting scores")
words = WordList()
words.new()
_import_words(words, path.with_suffix(".csv"))
_save_and_exit(words, path)
def _parse_args():
parser = argparse.ArgumentParser(
description="Flashcard app for German grammatical genders."
)
mode = parser.add_mutually_exclusive_group(required=True)
mode.add_argument(
"-q", "--quiz", type=int, help="Start the app in quiz mode.", dest="quiz_length"
)
mode.add_argument(
"-a",
"--add-words",
action="store_true",
help="Start the app in manual word addition mode.",
)
mode.add_argument(
"-l",
"--load-words",
help="Concatenates a prewritten list of words into the saved WordList.",
)
mode.add_argument(
"-r",
"--reset-scores",
action="store_true",
help="Reset all scores in the specified word list.",
)
parser.add_argument(
"-w", "--words", default="main_list", help="The name of the WordList to use."
)
return parser.parse_args()
def _load_words(path):
try:
words = WordList(path)
print("Words successfully loaded.")
except FileNotFoundError:
print(f"No word list found with given name.")
newfile = force_console_input(
"Would you like to create a new wordlist with the specified name? Y/N: ",
options=["y", "yes", "n", "no"],
)
if newfile[0] == "y":
words = WordList()
language = force_console_input(
query="Which language should be used?\n",
onfail="Language not recognised, please try again or check genders.json\n",
options=get_languages(),
)
words.new(language=language)
print(f"New WordList for language {language} successfully created.")
else:
raise IOError
return words
def _quiz(wordlist, quiz_length):
pd.options.mode.chained_assignment = None
answered, correct = 0, 0
for word, gender in wordlist.get_words(quiz_length):
guess = input(f"What is the gender of {word}? ").lower()
if guess in ("quit", "exit"):
break
answered += 1
try:
guess = wordlist.format_gender(guess)
except ValueError:
print("Unrecognised guess, skipping.\n")
continue
accurate = gender == guess
wordlist.update_weight(word, accurate)
if accurate:
print("Correct!\n")
correct += 1
else:
print(f"Incorrect! The correct gender is {gender}.\n")
return correct, answered, answered == quiz_length
def _quiz_endless(wordlist):
correct, answered = 0, 0
finished = False
while not finished:
results = _quiz(wordlist, 20)
correct += results[0]
answered += results[1]
finished = not results[2]
return correct, answered
def _add_words(wordlist):
print("Type a word with gender eg `m Mann` or `quit` when finished.")
while True:
input_str = input()
if input_str in ("quit", "exit"):
print("Exiting word addition mode...")
break
try:
gender, word = input_str.split()
wordlist.add(gender, word)
except ValueError as e:
print(e)
def _import_words(wordlist, import_path):
new_words = pd.read_csv(import_path)
words_added = 0
repetitions = 0
for _, row in new_words.iterrows():
try:
wordlist.add(row.Gender, row.Word)
words_added += 1
except ValueError:
repetitions += 1
return words_added, repetitions
def _save_and_exit(wordlist, path):
while True:
try:
wordlist.save(path=path)
print("WordList successfully saved, goodbye!")
break
except PermissionError:
print("PermissionError! File may be open in another window.")
retry = force_console_input("Try again? Y/N: ", ["y", "yes", "n", "no"])
if retry[0] == "y":
continue
else:
print("Exiting without saving changes.")
if __name__ == "__main__":
main()
| true
| true
|
790c29059bb91b3194999732eef18e0f92d55ef4
| 151
|
py
|
Python
|
figaro/__init__.py
|
rylans/figaro
|
99c4eb31d402d30dcb266e3da123edf698e979f5
|
[
"Apache-2.0"
] | null | null | null |
figaro/__init__.py
|
rylans/figaro
|
99c4eb31d402d30dcb266e3da123edf698e979f5
|
[
"Apache-2.0"
] | null | null | null |
figaro/__init__.py
|
rylans/figaro
|
99c4eb31d402d30dcb266e3da123edf698e979f5
|
[
"Apache-2.0"
] | null | null | null |
from .agent import Figaro
from .handlers.arithmetichandler import ArithmeticHandler
from .handlers.elizastatementhandler import ElizaStatementHandler
| 30.2
| 65
| 0.880795
|
from .agent import Figaro
from .handlers.arithmetichandler import ArithmeticHandler
from .handlers.elizastatementhandler import ElizaStatementHandler
| true
| true
|
790c2921c88db0e67e8f59a8ac4aaa322b1c55f7
| 32,147
|
py
|
Python
|
platipy/imaging/projects/cardiac/run.py
|
RadiotherapyAI/platipy
|
53294789a3805ea088c9953027f4ab09a614f052
|
[
"Apache-2.0"
] | null | null | null |
platipy/imaging/projects/cardiac/run.py
|
RadiotherapyAI/platipy
|
53294789a3805ea088c9953027f4ab09a614f052
|
[
"Apache-2.0"
] | null | null | null |
platipy/imaging/projects/cardiac/run.py
|
RadiotherapyAI/platipy
|
53294789a3805ea088c9953027f4ab09a614f052
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 University of New South Wales, University of Sydney, Ingham Institute
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import SimpleITK as sitk
import numpy as np
from loguru import logger
from platipy.imaging.registration.utils import apply_transform, convert_mask_to_reg_structure
from platipy.imaging.registration.linear import (
linear_registration,
)
from platipy.imaging.registration.deformable import (
fast_symmetric_forces_demons_registration,
)
from platipy.imaging.label.fusion import (
process_probability_image,
compute_weight_map,
combine_labels,
)
from platipy.imaging.label.iar import run_iar
from platipy.imaging.utils.vessel import vessel_spline_generation
from platipy.imaging.utils.valve import (
generate_valve_from_great_vessel,
generate_valve_using_cylinder,
)
from platipy.imaging.utils.conduction import (
geometric_sinoatrialnode,
geometric_atrioventricularnode,
)
from platipy.imaging.utils.crop import label_to_roi, crop_to_roi
from platipy.imaging.generation.mask import extend_mask
from platipy.imaging.label.utils import binary_encode_structure_list, correct_volume_overlap
ATLAS_PATH = "/atlas"
if "ATLAS_PATH" in os.environ:
ATLAS_PATH = os.environ["ATLAS_PATH"]
CARDIAC_SETTINGS_DEFAULTS = {
"atlas_settings": {
"atlas_id_list": [
"03",
"05",
"08",
"10",
"11",
"12",
"13",
"16",
"24",
"35",
],
"atlas_structure_list": [
"AORTICVALVE",
"ASCENDINGAORTA",
"LANTDESCARTERY",
"LCIRCUMFLEXARTERY",
"LCORONARYARTERY",
"LEFTATRIUM",
"LEFTVENTRICLE",
"MITRALVALVE",
"PULMONARYARTERY",
"PULMONICVALVE",
"RCORONARYARTERY",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"SVC",
"TRICUSPIDVALVE",
"WHOLEHEART",
],
"atlas_path": ATLAS_PATH,
"atlas_image_format": "Case_{0}/Images/Case_{0}_CROP.nii.gz",
"atlas_label_format": "Case_{0}/Structures/Case_{0}_{1}_CROP.nii.gz",
"crop_atlas_to_structures": False,
"crop_atlas_expansion_mm": (20, 20, 40),
"guide_structure_name": "WHOLEHEART",
"superior_extension": 30,
},
"auto_crop_target_image_settings": {
"expansion_mm": [20, 20, 40],
},
"linear_registration_settings": {
"reg_method": "affine",
"shrink_factors": [16, 8, 4],
"smooth_sigmas": [0, 0, 0],
"sampling_rate": 0.75,
"default_value": -1000,
"number_of_iterations": 50,
"metric": "mean_squares",
"optimiser": "gradient_descent_line_search",
"verbose": False,
},
"structure_guided_registration_settings": {
"isotropic_resample": True,
"resolution_staging": [
16,
8,
2,
], # specify voxel size (mm) since isotropic_resample is set
"iteration_staging": [50, 50, 50],
"smoothing_sigmas": [0, 0, 0],
"ncores": 8,
"default_value": 0,
"verbose": False,
},
"deformable_registration_settings": {
"isotropic_resample": True,
"resolution_staging": [
6,
3,
1.5,
], # specify voxel size (mm) since isotropic_resample is set
"iteration_staging": [200, 150, 100],
"smoothing_sigmas": [0, 0, 0],
"ncores": 8,
"default_value": 0,
"verbose": False,
},
"iar_settings": {
"reference_structure": False,
"smooth_distance_maps": True,
"smooth_sigma": 1,
"z_score_statistic": "mad",
"outlier_method": "iqr",
"outlier_factor": 1.5,
"min_best_atlases": 5,
"project_on_sphere": False,
},
"label_fusion_settings": {
"vote_type": "unweighted",
"vote_params": None,
"optimal_threshold": {
"AORTICVALVE": 0.5,
"ASCENDINGAORTA": 0.44,
"LEFTATRIUM": 0.40,
"LEFTVENTRICLE": 0.45,
"MITRALVALVE": 0.5,
"PULMONARYARTERY": 0.46,
"PULMONICVALVE": 0.5,
"RIGHTATRIUM": 0.38,
"RIGHTVENTRICLE": 0.42,
"SVC": 0.44,
"TRICUSPIDVALVE": 0.5,
"WHOLEHEART": 0.5,
},
},
"vessel_spline_settings": {
"vessel_name_list": [
"LANTDESCARTERY",
"LCIRCUMFLEXARTERY",
"LCORONARYARTERY",
"RCORONARYARTERY",
],
"vessel_radius_mm_dict": {
"LANTDESCARTERY": 2,
"LCIRCUMFLEXARTERY": 2,
"LCORONARYARTERY": 2,
"RCORONARYARTERY": 2,
},
"scan_direction_dict": {
"LANTDESCARTERY": "z",
"LCIRCUMFLEXARTERY": "z",
"LCORONARYARTERY": "x",
"RCORONARYARTERY": "z",
},
"stop_condition_type_dict": {
"LANTDESCARTERY": "count",
"LCIRCUMFLEXARTERY": "count",
"LCORONARYARTERY": "count",
"RCORONARYARTERY": "count",
},
"stop_condition_value_dict": {
"LANTDESCARTERY": 2,
"LCIRCUMFLEXARTERY": 2,
"LCORONARYARTERY": 2,
"RCORONARYARTERY": 2,
},
},
"geometric_segmentation_settings": {
"run_geometric_algorithms": True,
"geometric_name_suffix": "_GEOMETRIC",
"atlas_structure_names": {
"atlas_left_ventricle": "LEFTVENTRICLE",
"atlas_right_ventricle": "RIGHTVENTRICLE",
"atlas_left_atrium": "LEFTATRIUM",
"atlas_right_atrium": "RIGHTATRIUM",
"atlas_ascending_aorta": "ASCENDINGAORTA",
"atlas_pulmonary_artery": "PULMONARYARTERY",
"atlas_superior_vena_cava": "SVC",
"atlas_whole_heart": "WHOLEHEART",
},
"valve_definitions": {
"mitral_valve_thickness_mm": 10,
"mitral_valve_radius_mm": 15,
"tricuspid_valve_thickness_mm": 10,
"tricuspid_valve_radius_mm": 15,
"pulmonic_valve_thickness_mm": 10,
"aortic_valve_thickness_mm": 10,
},
"conduction_system_definitions": {
"sinoatrial_node_radius_mm": 10,
"atrioventricular_node_radius_mm": 10,
},
},
"postprocessing_settings": {
"run_postprocessing": True,
"binaryfillhole_mm": 3,
"structures_for_binaryfillhole": [
"ASCENDINGAORTA",
"LEFTATRIUM",
"LEFTVENTRICLE",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"SVC",
"AORTICVALVE",
"MITRALVALVE",
"PULMONICVALVE",
"TRICUSPIDVALVE",
"WHOLEHEART",
],
"structures_for_overlap_correction": [
"ASCENDINGAORTA",
"LEFTATRIUM",
"LEFTVENTRICLE",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"PULMONARYARTERY",
"SVC",
],
},
"return_atlas_guide_structure": False,
"return_as_cropped": False,
"return_proba_as_contours": False,
}
def run_cardiac_segmentation(img, guide_structure=None, settings=CARDIAC_SETTINGS_DEFAULTS):
"""Runs the atlas-based cardiac segmentation
Args:
img (sitk.Image):
settings (dict, optional): Dictionary containing settings for algorithm.
Defaults to default_settings.
Returns:
dict: Dictionary containing output of segmentation
"""
results = {}
results_prob = {}
return_as_cropped = settings["return_as_cropped"]
"""
Initialisation - Read in atlases
- image files
- structure files
Atlas structure:
'ID': 'Original': 'CT Image' : sitk.Image
'Struct A' : sitk.Image
'Struct B' : sitk.Image
'RIR' : 'CT Image' : sitk.Image
'Transform' : transform parameter map
'Struct A' : sitk.Image
'Struct B' : sitk.Image
'DIR' : 'CT Image' : sitk.Image
'Transform' : displacement field transform
'Weight Map' : sitk.Image
'Struct A' : sitk.Image
'Struct B' : sitk.Image
"""
logger.info("")
# Settings
atlas_path = settings["atlas_settings"]["atlas_path"]
atlas_id_list = settings["atlas_settings"]["atlas_id_list"]
atlas_structure_list = settings["atlas_settings"]["atlas_structure_list"]
atlas_image_format = settings["atlas_settings"]["atlas_image_format"]
atlas_label_format = settings["atlas_settings"]["atlas_label_format"]
crop_atlas_to_structures = settings["atlas_settings"]["crop_atlas_to_structures"]
crop_atlas_expansion_mm = settings["atlas_settings"]["crop_atlas_expansion_mm"]
atlas_set = {}
for atlas_id in atlas_id_list:
atlas_set[atlas_id] = {}
atlas_set[atlas_id]["Original"] = {}
image = sitk.ReadImage(f"{atlas_path}/{atlas_image_format.format(atlas_id)}")
structures = {
struct: sitk.ReadImage(f"{atlas_path}/{atlas_label_format.format(atlas_id, struct)}")
for struct in atlas_structure_list
}
if crop_atlas_to_structures:
logger.info(f"Automatically cropping atlas: {atlas_id}")
original_volume = np.product(image.GetSize())
crop_box_size, crop_box_index = label_to_roi(
structures.values(), expansion_mm=crop_atlas_expansion_mm
)
image = crop_to_roi(image, size=crop_box_size, index=crop_box_index)
final_volume = np.product(image.GetSize())
logger.info(f" > Volume reduced by factor {original_volume/final_volume:.2f}")
for struct in atlas_structure_list:
structures[struct] = crop_to_roi(
structures[struct], size=crop_box_size, index=crop_box_index
)
atlas_set[atlas_id]["Original"]["CT Image"] = image
for struct in atlas_structure_list:
atlas_set[atlas_id]["Original"][struct] = structures[struct]
"""
Step 1 - Automatic cropping
If we have a guide structure:
- use structure to crop target image
Otherwise:
- using a quick registration to register each atlas
- expansion of the bounding box to ensure entire volume of interest is enclosed
- target image is cropped
"""
expansion_mm = settings["auto_crop_target_image_settings"]["expansion_mm"]
if guide_structure:
crop_box_size, crop_box_index = label_to_roi(guide_structure, expansion_mm=expansion_mm)
img_crop = crop_to_roi(img, crop_box_size, crop_box_index)
guide_structure = crop_to_roi(guide_structure, crop_box_size, crop_box_index)
target_reg_structure = convert_mask_to_reg_structure(guide_structure, expansion=2)
else:
quick_reg_settings = {
"reg_method": "similarity",
"shrink_factors": [8],
"smooth_sigmas": [0],
"sampling_rate": 0.75,
"default_value": -1000,
"number_of_iterations": 25,
"final_interp": sitk.sitkLinear,
"metric": "mean_squares",
"optimiser": "gradient_descent_line_search",
}
registered_crop_images = []
logger.info("Running initial Translation tranform to crop image volume")
for atlas_id in atlas_id_list[: min([8, len(atlas_id_list)])]:
logger.info(f" > atlas {atlas_id}")
# Register the atlases
atlas_set[atlas_id]["RIR"] = {}
atlas_image = atlas_set[atlas_id]["Original"]["CT Image"]
reg_image, _ = linear_registration(
img,
atlas_image,
**quick_reg_settings,
)
registered_crop_images.append(sitk.Cast(reg_image, sitk.sitkFloat32))
del reg_image
combined_image = sum(registered_crop_images) / len(registered_crop_images) > -1000
crop_box_size, crop_box_index = label_to_roi(combined_image, expansion_mm=expansion_mm)
img_crop = crop_to_roi(img, crop_box_size, crop_box_index)
logger.info("Calculated crop box:")
logger.info(f" > {crop_box_index}")
logger.info(f" > {crop_box_size}")
logger.info(f" > Vol reduction = {np.product(img.GetSize())/np.product(crop_box_size):.2f}")
"""
Step 2 - Rigid registration of target images
- Individual atlas images are registered to the target
- The transformation is used to propagate the labels onto the target
"""
linear_registration_settings = settings["linear_registration_settings"]
logger.info(
f"Running {linear_registration_settings['reg_method']} tranform to align atlas images"
)
for atlas_id in atlas_id_list:
# Register the atlases
logger.info(f" > atlas {atlas_id}")
atlas_set[atlas_id]["RIR"] = {}
if guide_structure:
guide_structure_name = settings["atlas_settings"]["guide_structure_name"]
target_reg_image = target_reg_structure
atlas_reg_image = convert_mask_to_reg_structure(
atlas_set[atlas_id]["Original"][guide_structure_name], expansion=2
)
else:
target_reg_image = img_crop
atlas_reg_image = atlas_set[atlas_id]["Original"]["CT Image"]
_, initial_tfm = linear_registration(
target_reg_image,
atlas_reg_image,
**linear_registration_settings,
)
# Save in the atlas dict
atlas_set[atlas_id]["RIR"]["Transform"] = initial_tfm
if guide_structure:
atlas_set[atlas_id]["RIR"]["Reg Mask"] = apply_transform(
input_image=atlas_reg_image,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkLinear,
)
expanded_atlas_guide_structure = extend_mask(
atlas_set[atlas_id]["Original"][guide_structure_name],
direction=("ax", "sup"),
extension_mm=settings["atlas_settings"]["superior_extension"],
interior_mm_shape=settings["atlas_settings"]["superior_extension"] / 2,
)
atlas_set[atlas_id]["RIR"][guide_structure_name + "EXPANDED"] = apply_transform(
input_image=expanded_atlas_guide_structure,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["RIR"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id]["Original"]["CT Image"],
reference_image=img_crop,
transform=initial_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
# sitk.WriteImage(rigid_image, f"./RR_{atlas_id}.nii.gz")
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id]["Original"][struct]
atlas_set[atlas_id]["RIR"][struct] = apply_transform(
input_image=input_struct,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["Original"] = None
"""
Step 3 - Deformable image registration
- Using Fast Symmetric Diffeomorphic Demons
"""
if guide_structure:
structure_guided_registration_settings = settings["structure_guided_registration_settings"]
logger.info("Running structure-guided deformable registration on atlas labels")
for atlas_id in atlas_id_list:
logger.info(f" > atlas {atlas_id}")
# Register the atlases
atlas_set[atlas_id]["DIR_STRUCT"] = {}
deform_image, struct_guided_tfm, _ = fast_symmetric_forces_demons_registration(
target_reg_structure,
atlas_set[atlas_id]["RIR"]["Reg Mask"],
**structure_guided_registration_settings,
)
# Save in the atlas dict
atlas_set[atlas_id]["DIR_STRUCT"]["Reg Mask"] = deform_image
atlas_set[atlas_id]["DIR_STRUCT"]["Transform"] = struct_guided_tfm
atlas_set[atlas_id]["DIR_STRUCT"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id]["RIR"]["CT Image"],
transform=struct_guided_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
atlas_set[atlas_id]["DIR_STRUCT"][guide_structure_name + "EXPANDED"] = apply_transform(
input_image=atlas_set[atlas_id]["RIR"][guide_structure_name + "EXPANDED"],
reference_image=img_crop,
transform=struct_guided_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
# sitk.WriteImage(deform_image, f"./DIR_STRUCT_{atlas_id}.nii.gz")
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id]["RIR"][struct]
atlas_set[atlas_id]["DIR_STRUCT"][struct] = apply_transform(
input_image=input_struct,
transform=struct_guided_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["RIR"] = None
# Settings
deformable_registration_settings = settings["deformable_registration_settings"]
logger.info("Running DIR to refine atlas image registration")
for atlas_id in atlas_id_list:
logger.info(f" > atlas {atlas_id}")
# Register the atlases
atlas_set[atlas_id]["DIR"] = {}
if guide_structure:
label = "DIR_STRUCT"
else:
label = "RIR"
atlas_reg_image = atlas_set[atlas_id][label]["CT Image"]
target_reg_image = img_crop
if guide_structure:
expanded_atlas_mask = atlas_set[atlas_id]["DIR_STRUCT"][
guide_structure_name + "EXPANDED"
]
expanded_target_mask = extend_mask(
guide_structure,
direction=("ax", "sup"),
extension_mm=settings["atlas_settings"]["superior_extension"],
interior_mm_shape=settings["atlas_settings"]["superior_extension"] / 2,
)
combined_mask = sitk.Maximum(expanded_atlas_mask, expanded_target_mask)
atlas_reg_image = sitk.Mask(atlas_reg_image, combined_mask, outsideValue=-1000)
atlas_reg_image = sitk.Mask(
atlas_reg_image, atlas_reg_image > -400, outsideValue=-1000
)
target_reg_image = sitk.Mask(target_reg_image, combined_mask, outsideValue=-1000)
target_reg_image = sitk.Mask(
target_reg_image, atlas_reg_image > -400, outsideValue=-1000
)
deform_image, dir_tfm, _ = fast_symmetric_forces_demons_registration(
target_reg_image,
atlas_reg_image,
**deformable_registration_settings,
)
# Save in the atlas dict
atlas_set[atlas_id]["DIR"]["Transform"] = dir_tfm
atlas_set[atlas_id]["DIR"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id][label]["CT Image"],
transform=dir_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id][label][struct]
atlas_set[atlas_id]["DIR"][struct] = apply_transform(
input_image=input_struct,
transform=dir_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id][label] = None
"""
Step 4 - Iterative atlas removal
- This is an automatic process that will attempt to remove inconsistent atlases from the entire set
"""
# Compute weight maps
# Here we use simple GWV as this minises the potentially negative influence of mis-registered
# atlases
iar_settings = settings["iar_settings"]
if iar_settings["reference_structure"]:
for atlas_id in atlas_id_list:
atlas_image = atlas_set[atlas_id]["DIR"]["CT Image"]
weight_map = compute_weight_map(img_crop, atlas_image, vote_type="global")
atlas_set[atlas_id]["DIR"]["Weight Map"] = weight_map
atlas_set = run_iar(atlas_set=atlas_set, **iar_settings)
else:
logger.info("IAR: No reference structure, skipping iterative atlas removal.")
"""
Step 4 - Vessel Splining
"""
vessel_spline_settings = settings["vessel_spline_settings"]
if len(vessel_spline_settings["vessel_name_list"]) > 0:
segmented_vessel_dict = vessel_spline_generation(
img_crop, atlas_set, **vessel_spline_settings
)
else:
logger.info("No vessel splining required, continue.")
"""
Step 5 - Label Fusion
"""
# Compute weight maps
vote_type = settings["label_fusion_settings"]["vote_type"]
vote_params = settings["label_fusion_settings"]["vote_params"]
# Compute weight maps
for atlas_id in list(atlas_set.keys()):
atlas_image = atlas_set[atlas_id]["DIR"]["CT Image"]
weight_map = compute_weight_map(
img_crop, atlas_image, vote_type=vote_type, vote_params=vote_params
)
atlas_set[atlas_id]["DIR"]["Weight Map"] = weight_map
combined_label_dict = combine_labels(atlas_set, atlas_structure_list)
"""
Step 6 - Paste the cropped structure into the original image space
"""
logger.info("Generating binary segmentations.")
template_img_binary = sitk.Cast((img * 0), sitk.sitkUInt8)
template_img_prob = sitk.Cast((img * 0), sitk.sitkFloat64)
vote_structures = settings["label_fusion_settings"]["optimal_threshold"].keys()
vote_structures = [i for i in vote_structures if i in atlas_structure_list]
for structure_name in vote_structures:
probability_map = combined_label_dict[structure_name]
optimal_threshold = settings["label_fusion_settings"]["optimal_threshold"][structure_name]
binary_struct = process_probability_image(probability_map, optimal_threshold)
if return_as_cropped:
results[structure_name] = binary_struct
if settings["return_proba_as_contours"]:
atlas_contours = [
atlas_set[atlas_id]["DIR"][structure_name] >= 2 for atlas_id in atlas_id_list
]
results_prob[structure_name] = binary_encode_structure_list(atlas_contours)
else:
results_prob[structure_name] = probability_map
# We also generate another version of the guide_structure using the atlas contours
# We *can* return this, but probably don't want to
# Here this check is performed
if (not settings["return_atlas_guide_structure"]) and (guide_structure is not None):
results[guide_structure_name] = guide_structure
results_prob[guide_structure_name] = guide_structure
else:
if settings["return_proba_as_contours"]:
atlas_contours = [
atlas_set[atlas_id]["DIR"][structure_name] >= 2 for atlas_id in atlas_id_list
]
probability_img = binary_encode_structure_list(atlas_contours)
template_img_prob = sitk.Cast((img * 0), sitk.sitkUInt32)
else:
probability_img = probability_map
# Un-crop binary structure
paste_img_binary = sitk.Paste(
template_img_binary,
binary_struct,
binary_struct.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[structure_name] = paste_img_binary
# Un-crop probability map
paste_prob_img = sitk.Paste(
template_img_prob,
probability_img,
probability_img.GetSize(),
(0, 0, 0),
crop_box_index,
)
results_prob[structure_name] = paste_prob_img
# Un-crop the guide structure
if (not settings["return_atlas_guide_structure"]) and (guide_structure is not None):
new_guide_structure = sitk.Paste(
template_img_binary,
guide_structure,
guide_structure.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[guide_structure_name] = new_guide_structure
results_prob[guide_structure_name] = new_guide_structure
for structure_name in vessel_spline_settings["vessel_name_list"]:
binary_struct = segmented_vessel_dict[structure_name]
if return_as_cropped:
results[structure_name] = binary_struct
vessel_list = [
atlas_set[atlas_id]["DIR"][structure_name] for atlas_id in list(atlas_set.keys())
]
else:
# Un-crop binary vessel
paste_img_binary = sitk.Paste(
template_img_binary,
binary_struct,
binary_struct.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[structure_name] = paste_img_binary
vessel_list = []
for atlas_id in list(atlas_set.keys()):
paste_img_binary = sitk.Paste(
template_img_binary,
atlas_set[atlas_id]["DIR"][structure_name],
atlas_set[atlas_id]["DIR"][structure_name].GetSize(),
(0, 0, 0),
crop_box_index,
)
vessel_list.append(paste_img_binary)
# Encode list of vessels
encoded_vessels = binary_encode_structure_list(vessel_list)
results_prob[structure_name] = encoded_vessels
"""
Step 7 - Geometric definitions of cardiac valves and conduction system nodes
"""
geometric_segmentation_settings = settings["geometric_segmentation_settings"]
if geometric_segmentation_settings["run_geometric_algorithms"]:
logger.info("Computing geometric definitions for valves and conduction system.")
geom_atlas_names = geometric_segmentation_settings["atlas_structure_names"]
geom_valve_defs = geometric_segmentation_settings["valve_definitions"]
geom_conduction_defs = geometric_segmentation_settings["conduction_system_definitions"]
# 1 - MITRAL VALVE
mv_name = "MITRALVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[mv_name] = generate_valve_using_cylinder(
label_atrium=results[geom_atlas_names["atlas_left_atrium"]],
label_ventricle=results[geom_atlas_names["atlas_left_ventricle"]],
radius_mm=geom_valve_defs["mitral_valve_radius_mm"],
height_mm=geom_valve_defs["mitral_valve_thickness_mm"],
)
# 2 - TRICUSPID VALVE
tv_name = "TRICUSPIDVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[tv_name] = generate_valve_using_cylinder(
label_atrium=results[geom_atlas_names["atlas_right_atrium"]],
label_ventricle=results[geom_atlas_names["atlas_right_ventricle"]],
radius_mm=geom_valve_defs["tricuspid_valve_radius_mm"],
height_mm=geom_valve_defs["tricuspid_valve_thickness_mm"],
)
# 3 - AORTIC VALVE
av_name = "AORTICVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[av_name] = generate_valve_from_great_vessel(
label_great_vessel=results[geom_atlas_names["atlas_ascending_aorta"]],
label_ventricle=results[geom_atlas_names["atlas_left_ventricle"]],
valve_thickness_mm=geom_valve_defs["aortic_valve_thickness_mm"],
)
# 4 - PULMONIC VALVE
pv_name = "PULMONICVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[pv_name] = generate_valve_from_great_vessel(
label_great_vessel=results[geom_atlas_names["atlas_pulmonary_artery"]],
label_ventricle=results[geom_atlas_names["atlas_right_ventricle"]],
valve_thickness_mm=geom_valve_defs["pulmonic_valve_thickness_mm"],
)
# 5 - SINOATRIAL NODE
san_name = "SAN" + geometric_segmentation_settings["geometric_name_suffix"]
results[san_name] = geometric_sinoatrialnode(
label_svc=results[geom_atlas_names["atlas_superior_vena_cava"]],
label_ra=results[geom_atlas_names["atlas_right_atrium"]],
label_wholeheart=results[geom_atlas_names["atlas_whole_heart"]],
radius_mm=geom_conduction_defs["sinoatrial_node_radius_mm"],
)
# 6 - ATRIOVENTRICULAR NODE
avn_name = "AVN" + geometric_segmentation_settings["geometric_name_suffix"]
results[avn_name] = geometric_atrioventricularnode(
label_la=results[geom_atlas_names["atlas_left_atrium"]],
label_lv=results[geom_atlas_names["atlas_left_ventricle"]],
label_ra=results[geom_atlas_names["atlas_right_atrium"]],
label_rv=results[geom_atlas_names["atlas_right_ventricle"]],
radius_mm=geom_conduction_defs["atrioventricular_node_radius_mm"],
)
"""
Step 8 - Post-processing
"""
postprocessing_settings = settings["postprocessing_settings"]
if postprocessing_settings["run_postprocessing"]:
logger.info("Running post-processing.")
# Remove any smaller components and perform morphological closing (hole filling)
binaryfillhole_img = [
int(postprocessing_settings["binaryfillhole_mm"] / sp) for sp in img.GetSpacing()
]
for structure_name in postprocessing_settings["structures_for_binaryfillhole"]:
if structure_name not in results.keys():
continue
contour_s = results[structure_name]
contour_s = sitk.RelabelComponent(sitk.ConnectedComponent(contour_s)) == 1
contour_s = sitk.BinaryMorphologicalClosing(contour_s, binaryfillhole_img)
results[structure_name] = contour_s
# Remove any overlaps
input_overlap = {
s: results[s] for s in postprocessing_settings["structures_for_overlap_correction"]
}
output_overlap = correct_volume_overlap(input_overlap)
for s in postprocessing_settings["structures_for_overlap_correction"]:
results[s] = output_overlap[s]
if return_as_cropped:
results["CROP_IMAGE"] = img_crop
logger.info("Done!")
return results, results_prob
| 35.918436
| 103
| 0.613339
|
import os
import SimpleITK as sitk
import numpy as np
from loguru import logger
from platipy.imaging.registration.utils import apply_transform, convert_mask_to_reg_structure
from platipy.imaging.registration.linear import (
linear_registration,
)
from platipy.imaging.registration.deformable import (
fast_symmetric_forces_demons_registration,
)
from platipy.imaging.label.fusion import (
process_probability_image,
compute_weight_map,
combine_labels,
)
from platipy.imaging.label.iar import run_iar
from platipy.imaging.utils.vessel import vessel_spline_generation
from platipy.imaging.utils.valve import (
generate_valve_from_great_vessel,
generate_valve_using_cylinder,
)
from platipy.imaging.utils.conduction import (
geometric_sinoatrialnode,
geometric_atrioventricularnode,
)
from platipy.imaging.utils.crop import label_to_roi, crop_to_roi
from platipy.imaging.generation.mask import extend_mask
from platipy.imaging.label.utils import binary_encode_structure_list, correct_volume_overlap
ATLAS_PATH = "/atlas"
if "ATLAS_PATH" in os.environ:
ATLAS_PATH = os.environ["ATLAS_PATH"]
CARDIAC_SETTINGS_DEFAULTS = {
"atlas_settings": {
"atlas_id_list": [
"03",
"05",
"08",
"10",
"11",
"12",
"13",
"16",
"24",
"35",
],
"atlas_structure_list": [
"AORTICVALVE",
"ASCENDINGAORTA",
"LANTDESCARTERY",
"LCIRCUMFLEXARTERY",
"LCORONARYARTERY",
"LEFTATRIUM",
"LEFTVENTRICLE",
"MITRALVALVE",
"PULMONARYARTERY",
"PULMONICVALVE",
"RCORONARYARTERY",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"SVC",
"TRICUSPIDVALVE",
"WHOLEHEART",
],
"atlas_path": ATLAS_PATH,
"atlas_image_format": "Case_{0}/Images/Case_{0}_CROP.nii.gz",
"atlas_label_format": "Case_{0}/Structures/Case_{0}_{1}_CROP.nii.gz",
"crop_atlas_to_structures": False,
"crop_atlas_expansion_mm": (20, 20, 40),
"guide_structure_name": "WHOLEHEART",
"superior_extension": 30,
},
"auto_crop_target_image_settings": {
"expansion_mm": [20, 20, 40],
},
"linear_registration_settings": {
"reg_method": "affine",
"shrink_factors": [16, 8, 4],
"smooth_sigmas": [0, 0, 0],
"sampling_rate": 0.75,
"default_value": -1000,
"number_of_iterations": 50,
"metric": "mean_squares",
"optimiser": "gradient_descent_line_search",
"verbose": False,
},
"structure_guided_registration_settings": {
"isotropic_resample": True,
"resolution_staging": [
16,
8,
2,
],
"iteration_staging": [50, 50, 50],
"smoothing_sigmas": [0, 0, 0],
"ncores": 8,
"default_value": 0,
"verbose": False,
},
"deformable_registration_settings": {
"isotropic_resample": True,
"resolution_staging": [
6,
3,
1.5,
],
"iteration_staging": [200, 150, 100],
"smoothing_sigmas": [0, 0, 0],
"ncores": 8,
"default_value": 0,
"verbose": False,
},
"iar_settings": {
"reference_structure": False,
"smooth_distance_maps": True,
"smooth_sigma": 1,
"z_score_statistic": "mad",
"outlier_method": "iqr",
"outlier_factor": 1.5,
"min_best_atlases": 5,
"project_on_sphere": False,
},
"label_fusion_settings": {
"vote_type": "unweighted",
"vote_params": None,
"optimal_threshold": {
"AORTICVALVE": 0.5,
"ASCENDINGAORTA": 0.44,
"LEFTATRIUM": 0.40,
"LEFTVENTRICLE": 0.45,
"MITRALVALVE": 0.5,
"PULMONARYARTERY": 0.46,
"PULMONICVALVE": 0.5,
"RIGHTATRIUM": 0.38,
"RIGHTVENTRICLE": 0.42,
"SVC": 0.44,
"TRICUSPIDVALVE": 0.5,
"WHOLEHEART": 0.5,
},
},
"vessel_spline_settings": {
"vessel_name_list": [
"LANTDESCARTERY",
"LCIRCUMFLEXARTERY",
"LCORONARYARTERY",
"RCORONARYARTERY",
],
"vessel_radius_mm_dict": {
"LANTDESCARTERY": 2,
"LCIRCUMFLEXARTERY": 2,
"LCORONARYARTERY": 2,
"RCORONARYARTERY": 2,
},
"scan_direction_dict": {
"LANTDESCARTERY": "z",
"LCIRCUMFLEXARTERY": "z",
"LCORONARYARTERY": "x",
"RCORONARYARTERY": "z",
},
"stop_condition_type_dict": {
"LANTDESCARTERY": "count",
"LCIRCUMFLEXARTERY": "count",
"LCORONARYARTERY": "count",
"RCORONARYARTERY": "count",
},
"stop_condition_value_dict": {
"LANTDESCARTERY": 2,
"LCIRCUMFLEXARTERY": 2,
"LCORONARYARTERY": 2,
"RCORONARYARTERY": 2,
},
},
"geometric_segmentation_settings": {
"run_geometric_algorithms": True,
"geometric_name_suffix": "_GEOMETRIC",
"atlas_structure_names": {
"atlas_left_ventricle": "LEFTVENTRICLE",
"atlas_right_ventricle": "RIGHTVENTRICLE",
"atlas_left_atrium": "LEFTATRIUM",
"atlas_right_atrium": "RIGHTATRIUM",
"atlas_ascending_aorta": "ASCENDINGAORTA",
"atlas_pulmonary_artery": "PULMONARYARTERY",
"atlas_superior_vena_cava": "SVC",
"atlas_whole_heart": "WHOLEHEART",
},
"valve_definitions": {
"mitral_valve_thickness_mm": 10,
"mitral_valve_radius_mm": 15,
"tricuspid_valve_thickness_mm": 10,
"tricuspid_valve_radius_mm": 15,
"pulmonic_valve_thickness_mm": 10,
"aortic_valve_thickness_mm": 10,
},
"conduction_system_definitions": {
"sinoatrial_node_radius_mm": 10,
"atrioventricular_node_radius_mm": 10,
},
},
"postprocessing_settings": {
"run_postprocessing": True,
"binaryfillhole_mm": 3,
"structures_for_binaryfillhole": [
"ASCENDINGAORTA",
"LEFTATRIUM",
"LEFTVENTRICLE",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"SVC",
"AORTICVALVE",
"MITRALVALVE",
"PULMONICVALVE",
"TRICUSPIDVALVE",
"WHOLEHEART",
],
"structures_for_overlap_correction": [
"ASCENDINGAORTA",
"LEFTATRIUM",
"LEFTVENTRICLE",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"PULMONARYARTERY",
"SVC",
],
},
"return_atlas_guide_structure": False,
"return_as_cropped": False,
"return_proba_as_contours": False,
}
def run_cardiac_segmentation(img, guide_structure=None, settings=CARDIAC_SETTINGS_DEFAULTS):
results = {}
results_prob = {}
return_as_cropped = settings["return_as_cropped"]
logger.info("")
atlas_path = settings["atlas_settings"]["atlas_path"]
atlas_id_list = settings["atlas_settings"]["atlas_id_list"]
atlas_structure_list = settings["atlas_settings"]["atlas_structure_list"]
atlas_image_format = settings["atlas_settings"]["atlas_image_format"]
atlas_label_format = settings["atlas_settings"]["atlas_label_format"]
crop_atlas_to_structures = settings["atlas_settings"]["crop_atlas_to_structures"]
crop_atlas_expansion_mm = settings["atlas_settings"]["crop_atlas_expansion_mm"]
atlas_set = {}
for atlas_id in atlas_id_list:
atlas_set[atlas_id] = {}
atlas_set[atlas_id]["Original"] = {}
image = sitk.ReadImage(f"{atlas_path}/{atlas_image_format.format(atlas_id)}")
structures = {
struct: sitk.ReadImage(f"{atlas_path}/{atlas_label_format.format(atlas_id, struct)}")
for struct in atlas_structure_list
}
if crop_atlas_to_structures:
logger.info(f"Automatically cropping atlas: {atlas_id}")
original_volume = np.product(image.GetSize())
crop_box_size, crop_box_index = label_to_roi(
structures.values(), expansion_mm=crop_atlas_expansion_mm
)
image = crop_to_roi(image, size=crop_box_size, index=crop_box_index)
final_volume = np.product(image.GetSize())
logger.info(f" > Volume reduced by factor {original_volume/final_volume:.2f}")
for struct in atlas_structure_list:
structures[struct] = crop_to_roi(
structures[struct], size=crop_box_size, index=crop_box_index
)
atlas_set[atlas_id]["Original"]["CT Image"] = image
for struct in atlas_structure_list:
atlas_set[atlas_id]["Original"][struct] = structures[struct]
expansion_mm = settings["auto_crop_target_image_settings"]["expansion_mm"]
if guide_structure:
crop_box_size, crop_box_index = label_to_roi(guide_structure, expansion_mm=expansion_mm)
img_crop = crop_to_roi(img, crop_box_size, crop_box_index)
guide_structure = crop_to_roi(guide_structure, crop_box_size, crop_box_index)
target_reg_structure = convert_mask_to_reg_structure(guide_structure, expansion=2)
else:
quick_reg_settings = {
"reg_method": "similarity",
"shrink_factors": [8],
"smooth_sigmas": [0],
"sampling_rate": 0.75,
"default_value": -1000,
"number_of_iterations": 25,
"final_interp": sitk.sitkLinear,
"metric": "mean_squares",
"optimiser": "gradient_descent_line_search",
}
registered_crop_images = []
logger.info("Running initial Translation tranform to crop image volume")
for atlas_id in atlas_id_list[: min([8, len(atlas_id_list)])]:
logger.info(f" > atlas {atlas_id}")
atlas_set[atlas_id]["RIR"] = {}
atlas_image = atlas_set[atlas_id]["Original"]["CT Image"]
reg_image, _ = linear_registration(
img,
atlas_image,
**quick_reg_settings,
)
registered_crop_images.append(sitk.Cast(reg_image, sitk.sitkFloat32))
del reg_image
combined_image = sum(registered_crop_images) / len(registered_crop_images) > -1000
crop_box_size, crop_box_index = label_to_roi(combined_image, expansion_mm=expansion_mm)
img_crop = crop_to_roi(img, crop_box_size, crop_box_index)
logger.info("Calculated crop box:")
logger.info(f" > {crop_box_index}")
logger.info(f" > {crop_box_size}")
logger.info(f" > Vol reduction = {np.product(img.GetSize())/np.product(crop_box_size):.2f}")
linear_registration_settings = settings["linear_registration_settings"]
logger.info(
f"Running {linear_registration_settings['reg_method']} tranform to align atlas images"
)
for atlas_id in atlas_id_list:
logger.info(f" > atlas {atlas_id}")
atlas_set[atlas_id]["RIR"] = {}
if guide_structure:
guide_structure_name = settings["atlas_settings"]["guide_structure_name"]
target_reg_image = target_reg_structure
atlas_reg_image = convert_mask_to_reg_structure(
atlas_set[atlas_id]["Original"][guide_structure_name], expansion=2
)
else:
target_reg_image = img_crop
atlas_reg_image = atlas_set[atlas_id]["Original"]["CT Image"]
_, initial_tfm = linear_registration(
target_reg_image,
atlas_reg_image,
**linear_registration_settings,
)
atlas_set[atlas_id]["RIR"]["Transform"] = initial_tfm
if guide_structure:
atlas_set[atlas_id]["RIR"]["Reg Mask"] = apply_transform(
input_image=atlas_reg_image,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkLinear,
)
expanded_atlas_guide_structure = extend_mask(
atlas_set[atlas_id]["Original"][guide_structure_name],
direction=("ax", "sup"),
extension_mm=settings["atlas_settings"]["superior_extension"],
interior_mm_shape=settings["atlas_settings"]["superior_extension"] / 2,
)
atlas_set[atlas_id]["RIR"][guide_structure_name + "EXPANDED"] = apply_transform(
input_image=expanded_atlas_guide_structure,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["RIR"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id]["Original"]["CT Image"],
reference_image=img_crop,
transform=initial_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id]["Original"][struct]
atlas_set[atlas_id]["RIR"][struct] = apply_transform(
input_image=input_struct,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["Original"] = None
if guide_structure:
structure_guided_registration_settings = settings["structure_guided_registration_settings"]
logger.info("Running structure-guided deformable registration on atlas labels")
for atlas_id in atlas_id_list:
logger.info(f" > atlas {atlas_id}")
atlas_set[atlas_id]["DIR_STRUCT"] = {}
deform_image, struct_guided_tfm, _ = fast_symmetric_forces_demons_registration(
target_reg_structure,
atlas_set[atlas_id]["RIR"]["Reg Mask"],
**structure_guided_registration_settings,
)
atlas_set[atlas_id]["DIR_STRUCT"]["Reg Mask"] = deform_image
atlas_set[atlas_id]["DIR_STRUCT"]["Transform"] = struct_guided_tfm
atlas_set[atlas_id]["DIR_STRUCT"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id]["RIR"]["CT Image"],
transform=struct_guided_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
atlas_set[atlas_id]["DIR_STRUCT"][guide_structure_name + "EXPANDED"] = apply_transform(
input_image=atlas_set[atlas_id]["RIR"][guide_structure_name + "EXPANDED"],
reference_image=img_crop,
transform=struct_guided_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id]["RIR"][struct]
atlas_set[atlas_id]["DIR_STRUCT"][struct] = apply_transform(
input_image=input_struct,
transform=struct_guided_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["RIR"] = None
deformable_registration_settings = settings["deformable_registration_settings"]
logger.info("Running DIR to refine atlas image registration")
for atlas_id in atlas_id_list:
logger.info(f" > atlas {atlas_id}")
atlas_set[atlas_id]["DIR"] = {}
if guide_structure:
label = "DIR_STRUCT"
else:
label = "RIR"
atlas_reg_image = atlas_set[atlas_id][label]["CT Image"]
target_reg_image = img_crop
if guide_structure:
expanded_atlas_mask = atlas_set[atlas_id]["DIR_STRUCT"][
guide_structure_name + "EXPANDED"
]
expanded_target_mask = extend_mask(
guide_structure,
direction=("ax", "sup"),
extension_mm=settings["atlas_settings"]["superior_extension"],
interior_mm_shape=settings["atlas_settings"]["superior_extension"] / 2,
)
combined_mask = sitk.Maximum(expanded_atlas_mask, expanded_target_mask)
atlas_reg_image = sitk.Mask(atlas_reg_image, combined_mask, outsideValue=-1000)
atlas_reg_image = sitk.Mask(
atlas_reg_image, atlas_reg_image > -400, outsideValue=-1000
)
target_reg_image = sitk.Mask(target_reg_image, combined_mask, outsideValue=-1000)
target_reg_image = sitk.Mask(
target_reg_image, atlas_reg_image > -400, outsideValue=-1000
)
deform_image, dir_tfm, _ = fast_symmetric_forces_demons_registration(
target_reg_image,
atlas_reg_image,
**deformable_registration_settings,
)
atlas_set[atlas_id]["DIR"]["Transform"] = dir_tfm
atlas_set[atlas_id]["DIR"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id][label]["CT Image"],
transform=dir_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id][label][struct]
atlas_set[atlas_id]["DIR"][struct] = apply_transform(
input_image=input_struct,
transform=dir_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id][label] = None
iar_settings = settings["iar_settings"]
if iar_settings["reference_structure"]:
for atlas_id in atlas_id_list:
atlas_image = atlas_set[atlas_id]["DIR"]["CT Image"]
weight_map = compute_weight_map(img_crop, atlas_image, vote_type="global")
atlas_set[atlas_id]["DIR"]["Weight Map"] = weight_map
atlas_set = run_iar(atlas_set=atlas_set, **iar_settings)
else:
logger.info("IAR: No reference structure, skipping iterative atlas removal.")
vessel_spline_settings = settings["vessel_spline_settings"]
if len(vessel_spline_settings["vessel_name_list"]) > 0:
segmented_vessel_dict = vessel_spline_generation(
img_crop, atlas_set, **vessel_spline_settings
)
else:
logger.info("No vessel splining required, continue.")
vote_type = settings["label_fusion_settings"]["vote_type"]
vote_params = settings["label_fusion_settings"]["vote_params"]
for atlas_id in list(atlas_set.keys()):
atlas_image = atlas_set[atlas_id]["DIR"]["CT Image"]
weight_map = compute_weight_map(
img_crop, atlas_image, vote_type=vote_type, vote_params=vote_params
)
atlas_set[atlas_id]["DIR"]["Weight Map"] = weight_map
combined_label_dict = combine_labels(atlas_set, atlas_structure_list)
logger.info("Generating binary segmentations.")
template_img_binary = sitk.Cast((img * 0), sitk.sitkUInt8)
template_img_prob = sitk.Cast((img * 0), sitk.sitkFloat64)
vote_structures = settings["label_fusion_settings"]["optimal_threshold"].keys()
vote_structures = [i for i in vote_structures if i in atlas_structure_list]
for structure_name in vote_structures:
probability_map = combined_label_dict[structure_name]
optimal_threshold = settings["label_fusion_settings"]["optimal_threshold"][structure_name]
binary_struct = process_probability_image(probability_map, optimal_threshold)
if return_as_cropped:
results[structure_name] = binary_struct
if settings["return_proba_as_contours"]:
atlas_contours = [
atlas_set[atlas_id]["DIR"][structure_name] >= 2 for atlas_id in atlas_id_list
]
results_prob[structure_name] = binary_encode_structure_list(atlas_contours)
else:
results_prob[structure_name] = probability_map
# Here this check is performed
if (not settings["return_atlas_guide_structure"]) and (guide_structure is not None):
results[guide_structure_name] = guide_structure
results_prob[guide_structure_name] = guide_structure
else:
if settings["return_proba_as_contours"]:
atlas_contours = [
atlas_set[atlas_id]["DIR"][structure_name] >= 2 for atlas_id in atlas_id_list
]
probability_img = binary_encode_structure_list(atlas_contours)
template_img_prob = sitk.Cast((img * 0), sitk.sitkUInt32)
else:
probability_img = probability_map
# Un-crop binary structure
paste_img_binary = sitk.Paste(
template_img_binary,
binary_struct,
binary_struct.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[structure_name] = paste_img_binary
# Un-crop probability map
paste_prob_img = sitk.Paste(
template_img_prob,
probability_img,
probability_img.GetSize(),
(0, 0, 0),
crop_box_index,
)
results_prob[structure_name] = paste_prob_img
# Un-crop the guide structure
if (not settings["return_atlas_guide_structure"]) and (guide_structure is not None):
new_guide_structure = sitk.Paste(
template_img_binary,
guide_structure,
guide_structure.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[guide_structure_name] = new_guide_structure
results_prob[guide_structure_name] = new_guide_structure
for structure_name in vessel_spline_settings["vessel_name_list"]:
binary_struct = segmented_vessel_dict[structure_name]
if return_as_cropped:
results[structure_name] = binary_struct
vessel_list = [
atlas_set[atlas_id]["DIR"][structure_name] for atlas_id in list(atlas_set.keys())
]
else:
# Un-crop binary vessel
paste_img_binary = sitk.Paste(
template_img_binary,
binary_struct,
binary_struct.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[structure_name] = paste_img_binary
vessel_list = []
for atlas_id in list(atlas_set.keys()):
paste_img_binary = sitk.Paste(
template_img_binary,
atlas_set[atlas_id]["DIR"][structure_name],
atlas_set[atlas_id]["DIR"][structure_name].GetSize(),
(0, 0, 0),
crop_box_index,
)
vessel_list.append(paste_img_binary)
# Encode list of vessels
encoded_vessels = binary_encode_structure_list(vessel_list)
results_prob[structure_name] = encoded_vessels
geometric_segmentation_settings = settings["geometric_segmentation_settings"]
if geometric_segmentation_settings["run_geometric_algorithms"]:
logger.info("Computing geometric definitions for valves and conduction system.")
geom_atlas_names = geometric_segmentation_settings["atlas_structure_names"]
geom_valve_defs = geometric_segmentation_settings["valve_definitions"]
geom_conduction_defs = geometric_segmentation_settings["conduction_system_definitions"]
# 1 - MITRAL VALVE
mv_name = "MITRALVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[mv_name] = generate_valve_using_cylinder(
label_atrium=results[geom_atlas_names["atlas_left_atrium"]],
label_ventricle=results[geom_atlas_names["atlas_left_ventricle"]],
radius_mm=geom_valve_defs["mitral_valve_radius_mm"],
height_mm=geom_valve_defs["mitral_valve_thickness_mm"],
)
# 2 - TRICUSPID VALVE
tv_name = "TRICUSPIDVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[tv_name] = generate_valve_using_cylinder(
label_atrium=results[geom_atlas_names["atlas_right_atrium"]],
label_ventricle=results[geom_atlas_names["atlas_right_ventricle"]],
radius_mm=geom_valve_defs["tricuspid_valve_radius_mm"],
height_mm=geom_valve_defs["tricuspid_valve_thickness_mm"],
)
# 3 - AORTIC VALVE
av_name = "AORTICVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[av_name] = generate_valve_from_great_vessel(
label_great_vessel=results[geom_atlas_names["atlas_ascending_aorta"]],
label_ventricle=results[geom_atlas_names["atlas_left_ventricle"]],
valve_thickness_mm=geom_valve_defs["aortic_valve_thickness_mm"],
)
# 4 - PULMONIC VALVE
pv_name = "PULMONICVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[pv_name] = generate_valve_from_great_vessel(
label_great_vessel=results[geom_atlas_names["atlas_pulmonary_artery"]],
label_ventricle=results[geom_atlas_names["atlas_right_ventricle"]],
valve_thickness_mm=geom_valve_defs["pulmonic_valve_thickness_mm"],
)
# 5 - SINOATRIAL NODE
san_name = "SAN" + geometric_segmentation_settings["geometric_name_suffix"]
results[san_name] = geometric_sinoatrialnode(
label_svc=results[geom_atlas_names["atlas_superior_vena_cava"]],
label_ra=results[geom_atlas_names["atlas_right_atrium"]],
label_wholeheart=results[geom_atlas_names["atlas_whole_heart"]],
radius_mm=geom_conduction_defs["sinoatrial_node_radius_mm"],
)
# 6 - ATRIOVENTRICULAR NODE
avn_name = "AVN" + geometric_segmentation_settings["geometric_name_suffix"]
results[avn_name] = geometric_atrioventricularnode(
label_la=results[geom_atlas_names["atlas_left_atrium"]],
label_lv=results[geom_atlas_names["atlas_left_ventricle"]],
label_ra=results[geom_atlas_names["atlas_right_atrium"]],
label_rv=results[geom_atlas_names["atlas_right_ventricle"]],
radius_mm=geom_conduction_defs["atrioventricular_node_radius_mm"],
)
postprocessing_settings = settings["postprocessing_settings"]
if postprocessing_settings["run_postprocessing"]:
logger.info("Running post-processing.")
# Remove any smaller components and perform morphological closing (hole filling)
binaryfillhole_img = [
int(postprocessing_settings["binaryfillhole_mm"] / sp) for sp in img.GetSpacing()
]
for structure_name in postprocessing_settings["structures_for_binaryfillhole"]:
if structure_name not in results.keys():
continue
contour_s = results[structure_name]
contour_s = sitk.RelabelComponent(sitk.ConnectedComponent(contour_s)) == 1
contour_s = sitk.BinaryMorphologicalClosing(contour_s, binaryfillhole_img)
results[structure_name] = contour_s
# Remove any overlaps
input_overlap = {
s: results[s] for s in postprocessing_settings["structures_for_overlap_correction"]
}
output_overlap = correct_volume_overlap(input_overlap)
for s in postprocessing_settings["structures_for_overlap_correction"]:
results[s] = output_overlap[s]
if return_as_cropped:
results["CROP_IMAGE"] = img_crop
logger.info("Done!")
return results, results_prob
| true
| true
|
790c2a82d1a2ff91fe456d51a9406506669ddb6e
| 3,540
|
py
|
Python
|
econotw.py
|
ppnasser/econotw
|
ccc3d7fefebdc70b2abfc3e75ba387c22e937f86
|
[
"MIT"
] | null | null | null |
econotw.py
|
ppnasser/econotw
|
ccc3d7fefebdc70b2abfc3e75ba387c22e937f86
|
[
"MIT"
] | null | null | null |
econotw.py
|
ppnasser/econotw
|
ccc3d7fefebdc70b2abfc3e75ba387c22e937f86
|
[
"MIT"
] | null | null | null |
import tweepy
from time import sleep
from datetime import datetime
from keys import *
from tqdm import tqdm
def play(test=True,i_pages=3, i_hashtag=20, like_pages=False, like_hashtag=False):
while True == True:
try:
econotwbot(test, i_pages, i_hashtag)
except Exception as e:
print(e)
sleep(60*30)
pass
class econotwbot:
def __init__(self, test=True, i_pages=3, i_hashtag=20, like_pages=False, like_hashtag=False):
self.test = test
self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
self.auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(self.auth)
self._file_following = 'to_follow.txt'
self.i_pages = i_pages
self.i_hashtag = i_hashtag
self.like_pages = like_pages
self.like_hashtag = like_hashtag
self.pbar = 0
self.iteractions = self.i_pages*len(self.following_pages())+self.i_hashtag
if self.test==True:
self.hellow_world()
self.retweepy()
while self.test==False:
print()
print("Starting!",datetime.now())
print()
with tqdm(total=self.iteractions) as self.pbar:
self.retweepy_following()
tqdm.write("Just give me 5 more minutes to sleep please!")
sleep(5*60)
self.retweepy()
print()
print("Iteraction done!",datetime.now())
sleep(30*60)
def following_pages(self):
with open(self._file_following, 'r') as f:
return f.read().splitlines()
def retweepy(self):
tqdm.write('Delivering tweets with #econotw OR #EconTwitter')
dici={'q':'#econotw OR #EconTwitter'}
args={'method':self.api.search,
'dici':dici,
'like':self.like_hashtag,
'i':self.i_hashtag}
self.like_and_rt(**args)
def retweepy_following(self):
tqdm.write('Delivering interesting tweets')
for page in self.following_pages():
dici={'screen_name':page}
args={'method':self.api.user_timeline,
'dici':dici,
'like':self.like_pages,
'i':self.i_pages}
self.like_and_rt(**args)
def like_and_rt(self,method,dici,like,i):
count=0
for tweet in tweepy.Cursor(method=method,**dici).items(i):
self.pbar.update(1)
count+=1
try:
if like==True:
self.api.create_favorite(tweet.id)
sleep(1)
tweet.retweet()
string= 'Retweeted: '+ str(tweet.id) +' @'+tweet.user.screen_name
tqdm.write(string)
sleep(10)
# Print retweet errors
except tweepy.TweepError as error:
if (eval(error.reason)[0]['code'] != 139) and (eval(error.reason)[0]['code'] != 327):
tqdm.write('\nError. '+str(tweet.id)+' Retweet not successful. Reason: ')
tqdm.write(str(error.reason) +' '+ str(datetime.now()))
self.pbar.update(i-count)
except StopIteration:
break
def hello_world(self):
self.api.update_status("""Hello World! #econotw""")
if __name__ == "__main__":
play(test=False)
| 34.038462
| 101
| 0.54096
|
import tweepy
from time import sleep
from datetime import datetime
from keys import *
from tqdm import tqdm
def play(test=True,i_pages=3, i_hashtag=20, like_pages=False, like_hashtag=False):
while True == True:
try:
econotwbot(test, i_pages, i_hashtag)
except Exception as e:
print(e)
sleep(60*30)
pass
class econotwbot:
def __init__(self, test=True, i_pages=3, i_hashtag=20, like_pages=False, like_hashtag=False):
self.test = test
self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
self.auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(self.auth)
self._file_following = 'to_follow.txt'
self.i_pages = i_pages
self.i_hashtag = i_hashtag
self.like_pages = like_pages
self.like_hashtag = like_hashtag
self.pbar = 0
self.iteractions = self.i_pages*len(self.following_pages())+self.i_hashtag
if self.test==True:
self.hellow_world()
self.retweepy()
while self.test==False:
print()
print("Starting!",datetime.now())
print()
with tqdm(total=self.iteractions) as self.pbar:
self.retweepy_following()
tqdm.write("Just give me 5 more minutes to sleep please!")
sleep(5*60)
self.retweepy()
print()
print("Iteraction done!",datetime.now())
sleep(30*60)
def following_pages(self):
with open(self._file_following, 'r') as f:
return f.read().splitlines()
def retweepy(self):
tqdm.write('Delivering tweets with #econotw OR #EconTwitter')
dici={'q':'#econotw OR #EconTwitter'}
args={'method':self.api.search,
'dici':dici,
'like':self.like_hashtag,
'i':self.i_hashtag}
self.like_and_rt(**args)
def retweepy_following(self):
tqdm.write('Delivering interesting tweets')
for page in self.following_pages():
dici={'screen_name':page}
args={'method':self.api.user_timeline,
'dici':dici,
'like':self.like_pages,
'i':self.i_pages}
self.like_and_rt(**args)
def like_and_rt(self,method,dici,like,i):
count=0
for tweet in tweepy.Cursor(method=method,**dici).items(i):
self.pbar.update(1)
count+=1
try:
if like==True:
self.api.create_favorite(tweet.id)
sleep(1)
tweet.retweet()
string= 'Retweeted: '+ str(tweet.id) +' @'+tweet.user.screen_name
tqdm.write(string)
sleep(10)
except tweepy.TweepError as error:
if (eval(error.reason)[0]['code'] != 139) and (eval(error.reason)[0]['code'] != 327):
tqdm.write('\nError. '+str(tweet.id)+' Retweet not successful. Reason: ')
tqdm.write(str(error.reason) +' '+ str(datetime.now()))
self.pbar.update(i-count)
except StopIteration:
break
def hello_world(self):
self.api.update_status("""Hello World! #econotw""")
if __name__ == "__main__":
play(test=False)
| true
| true
|
790c2ba52deb93b8627cb1893fd19f1d018492af
| 41,852
|
py
|
Python
|
scripts/gen_vimdoc.py
|
MDeiml/neovim
|
502f03fc064d1eb427d214521d5cb9f5425a15b4
|
[
"Vim"
] | null | null | null |
scripts/gen_vimdoc.py
|
MDeiml/neovim
|
502f03fc064d1eb427d214521d5cb9f5425a15b4
|
[
"Vim"
] | null | null | null |
scripts/gen_vimdoc.py
|
MDeiml/neovim
|
502f03fc064d1eb427d214521d5cb9f5425a15b4
|
[
"Vim"
] | null | null | null |
#!/usr/bin/env python3
"""Generates Nvim :help docs from C/Lua docstrings, using Doxygen.
Also generates *.mpack files. To inspect the *.mpack structure:
:new | put=v:lua.vim.inspect(msgpackparse(readfile('runtime/doc/api.mpack')))
Flow:
main
extract_from_xml
fmt_node_as_vimhelp \
para_as_map } recursive
update_params_map /
render_node
This would be easier using lxml and XSLT, but:
1. This should avoid needing Python dependencies, especially ones that are
C modules that have library dependencies (lxml requires libxml and
libxslt).
2. I wouldn't know how to deal with nested indentation in <para> tags using
XSLT.
Each function :help block is formatted as follows:
- Max width of 78 columns (`text_width`).
- Indent with spaces (not tabs).
- Indent of 16 columns for body text.
- Function signature and helptag (right-aligned) on the same line.
- Signature and helptag must have a minimum of 8 spaces between them.
- If the signature is too long, it is placed on the line after the helptag.
Signature wraps at `text_width - 8` characters with subsequent
lines indented to the open parenthesis.
- Subsection bodies are indented an additional 4 spaces.
- Body consists of function description, parameters, return description, and
C declaration (`INCLUDE_C_DECL`).
- Parameters are omitted for the `void` and `Error *` types, or if the
parameter is marked as [out].
- Each function documentation is separated by a single line.
"""
import argparse
import os
import re
import sys
import shutil
import textwrap
import subprocess
import collections
import msgpack
import logging
from xml.dom import minidom
MIN_PYTHON_VERSION = (3, 6)
MIN_DOXYGEN_VERSION = (1, 9, 0)
if sys.version_info < MIN_PYTHON_VERSION:
print("requires Python {}.{}+".format(*MIN_PYTHON_VERSION))
sys.exit(1)
doxygen_version = tuple([int(i) for i in subprocess.check_output(["doxygen", "-v"],
universal_newlines=True).split()[0].split('.')])
if doxygen_version < MIN_DOXYGEN_VERSION:
print("\nRequires doxygen {}.{}.{}+".format(*MIN_DOXYGEN_VERSION))
print("Your doxygen version is {}.{}.{}\n".format(*doxygen_version))
sys.exit(1)
# DEBUG = ('DEBUG' in os.environ)
INCLUDE_C_DECL = ('INCLUDE_C_DECL' in os.environ)
INCLUDE_DEPRECATED = ('INCLUDE_DEPRECATED' in os.environ)
log = logging.getLogger(__name__)
LOG_LEVELS = {
logging.getLevelName(level): level for level in [
logging.DEBUG, logging.INFO, logging.ERROR
]
}
text_width = 78
script_path = os.path.abspath(__file__)
base_dir = os.path.dirname(os.path.dirname(script_path))
out_dir = os.path.join(base_dir, 'tmp-{target}-doc')
filter_cmd = '%s %s' % (sys.executable, script_path)
msgs = [] # Messages to show on exit.
lua2dox_filter = os.path.join(base_dir, 'scripts', 'lua2dox_filter')
CONFIG = {
'api': {
'mode': 'c',
'filename': 'api.txt',
# Section ordering.
'section_order': [
'vim.c',
'vimscript.c',
'buffer.c',
'extmark.c',
'window.c',
'win_config.c',
'tabpage.c',
'autocmd.c',
'ui.c',
],
# List of files/directories for doxygen to read, relative to `base_dir`
'files': ['src/nvim/api'],
# file patterns used by doxygen
'file_patterns': '*.h *.c',
# Only function with this prefix are considered
'fn_name_prefix': 'nvim_',
# Section name overrides.
'section_name': {
'vim.c': 'Global',
},
# For generated section names.
'section_fmt': lambda name: f'{name} Functions',
# Section helptag.
'helptag_fmt': lambda name: f'*api-{name.lower()}*',
# Per-function helptag.
'fn_helptag_fmt': lambda fstem, name: f'*{name}()*',
# Module name overrides (for Lua).
'module_override': {},
# Append the docs for these modules, do not start a new section.
'append_only': [],
},
'lua': {
'mode': 'lua',
'filename': 'lua.txt',
'section_order': [
'_editor.lua',
'shared.lua',
'uri.lua',
'ui.lua',
'filetype.lua',
'keymap.lua',
'fs.lua',
],
'files': [
'runtime/lua/vim/_editor.lua',
'runtime/lua/vim/shared.lua',
'runtime/lua/vim/uri.lua',
'runtime/lua/vim/ui.lua',
'runtime/lua/vim/filetype.lua',
'runtime/lua/vim/keymap.lua',
'runtime/lua/vim/fs.lua',
],
'file_patterns': '*.lua',
'fn_name_prefix': '',
'section_name': {
'lsp.lua': 'core',
},
'section_fmt': lambda name: (
'Lua module: vim'
if name.lower() == '_editor'
else f'Lua module: {name.lower()}'),
'helptag_fmt': lambda name: (
'*lua-vim*'
if name.lower() == '_editor'
else f'*lua-{name.lower()}*'),
'fn_helptag_fmt': lambda fstem, name: (
f'*vim.{name}()*'
if fstem.lower() == '_editor'
else f'*{fstem}.{name}()*'),
'module_override': {
# `shared` functions are exposed on the `vim` module.
'shared': 'vim',
'uri': 'vim',
'ui': 'vim.ui',
'filetype': 'vim.filetype',
'keymap': 'vim.keymap',
'fs': 'vim.fs',
},
'append_only': [
'shared.lua',
],
},
'lsp': {
'mode': 'lua',
'filename': 'lsp.txt',
'section_order': [
'lsp.lua',
'buf.lua',
'diagnostic.lua',
'codelens.lua',
'tagfunc.lua',
'handlers.lua',
'util.lua',
'log.lua',
'rpc.lua',
'sync.lua',
'protocol.lua',
],
'files': [
'runtime/lua/vim/lsp',
'runtime/lua/vim/lsp.lua',
],
'file_patterns': '*.lua',
'fn_name_prefix': '',
'section_name': {'lsp.lua': 'lsp'},
'section_fmt': lambda name: (
'Lua module: vim.lsp'
if name.lower() == 'lsp'
else f'Lua module: vim.lsp.{name.lower()}'),
'helptag_fmt': lambda name: (
'*lsp-core*'
if name.lower() == 'lsp'
else f'*lsp-{name.lower()}*'),
'fn_helptag_fmt': lambda fstem, name: (
f'*vim.lsp.{name}()*'
if fstem == 'lsp' and name != 'client'
else (
'*vim.lsp.client*'
# HACK. TODO(justinmk): class/structure support in lua2dox
if 'lsp.client' == f'{fstem}.{name}'
else f'*vim.lsp.{fstem}.{name}()*')),
'module_override': {},
'append_only': [],
},
'diagnostic': {
'mode': 'lua',
'filename': 'diagnostic.txt',
'section_order': [
'diagnostic.lua',
],
'files': ['runtime/lua/vim/diagnostic.lua'],
'file_patterns': '*.lua',
'fn_name_prefix': '',
'section_name': {'diagnostic.lua': 'diagnostic'},
'section_fmt': lambda _: 'Lua module: vim.diagnostic',
'helptag_fmt': lambda _: '*diagnostic-api*',
'fn_helptag_fmt': lambda fstem, name: f'*vim.{fstem}.{name}()*',
'module_override': {},
'append_only': [],
},
'treesitter': {
'mode': 'lua',
'filename': 'treesitter.txt',
'section_order': [
'treesitter.lua',
'language.lua',
'query.lua',
'highlighter.lua',
'languagetree.lua',
],
'files': [
'runtime/lua/vim/treesitter.lua',
'runtime/lua/vim/treesitter/',
],
'file_patterns': '*.lua',
'fn_name_prefix': '',
'section_name': {},
'section_fmt': lambda name: (
'Lua module: vim.treesitter'
if name.lower() == 'treesitter'
else f'Lua module: vim.treesitter.{name.lower()}'),
'helptag_fmt': lambda name: (
'*lua-treesitter-core*'
if name.lower() == 'treesitter'
else f'*treesitter-{name.lower()}*'),
'fn_helptag_fmt': lambda fstem, name: (
f'*{name}()*'
if name != 'new'
else f'*{fstem}.{name}()*'),
# 'fn_helptag_fmt': lambda fstem, name: (
# f'*vim.treesitter.{name}()*'
# if fstem == 'treesitter'
# else (
# '*vim.lsp.client*'
# # HACK. TODO(justinmk): class/structure support in lua2dox
# if 'lsp.client' == f'{fstem}.{name}'
# else f'*vim.lsp.{fstem}.{name}()*')),
'module_override': {},
'append_only': [],
}
}
param_exclude = (
'channel_id',
)
# Annotations are displayed as line items after API function descriptions.
annotation_map = {
'FUNC_API_FAST': '|api-fast|',
'FUNC_API_CHECK_TEXTLOCK': 'not allowed when |textlock| is active',
'FUNC_API_REMOTE_ONLY': '|RPC| only',
'FUNC_API_LUA_ONLY': '|vim.api| only',
}
# Raises an error with details about `o`, if `cond` is in object `o`,
# or if `cond()` is callable and returns True.
def debug_this(o, cond=True):
name = ''
if not isinstance(o, str):
try:
name = o.nodeName
o = o.toprettyxml(indent=' ', newl='\n')
except Exception:
pass
if ((callable(cond) and cond())
or (not callable(cond) and cond)
or (not callable(cond) and cond in o)):
raise RuntimeError('xxx: {}\n{}'.format(name, o))
# Appends a message to a list which will be printed on exit.
def msg(s):
msgs.append(s)
# Print all collected messages.
def msg_report():
for m in msgs:
print(f' {m}')
# Print collected messages, then throw an exception.
def fail(s):
msg_report()
raise RuntimeError(s)
def find_first(parent, name):
"""Finds the first matching node within parent."""
sub = parent.getElementsByTagName(name)
if not sub:
return None
return sub[0]
def iter_children(parent, name):
"""Yields matching child nodes within parent."""
for child in parent.childNodes:
if child.nodeType == child.ELEMENT_NODE and child.nodeName == name:
yield child
def get_child(parent, name):
"""Gets the first matching child node."""
for child in iter_children(parent, name):
return child
return None
def self_or_child(n):
"""Gets the first child node, or self."""
if len(n.childNodes) == 0:
return n
return n.childNodes[0]
def clean_lines(text):
"""Removes superfluous lines.
The beginning and end of the string is trimmed. Empty lines are collapsed.
"""
return re.sub(r'\A\n\s*\n*|\n\s*\n*\Z', '', re.sub(r'(\n\s*\n+)+', '\n\n', text))
def is_blank(text):
return '' == clean_lines(text)
def get_text(n, preformatted=False):
"""Recursively concatenates all text in a node tree."""
text = ''
if n.nodeType == n.TEXT_NODE:
return n.data
if n.nodeName == 'computeroutput':
for node in n.childNodes:
text += get_text(node)
return '`{}`'.format(text)
for node in n.childNodes:
if node.nodeType == node.TEXT_NODE:
text += node.data
elif node.nodeType == node.ELEMENT_NODE:
text += get_text(node, preformatted)
return text
# Gets the length of the last line in `text`, excluding newline ("\n") char.
def len_lastline(text):
lastnl = text.rfind('\n')
if -1 == lastnl:
return len(text)
if '\n' == text[-1]:
return lastnl - (1 + text.rfind('\n', 0, lastnl))
return len(text) - (1 + lastnl)
def len_lastline_withoutindent(text, indent):
n = len_lastline(text)
return (n - len(indent)) if n > len(indent) else 0
# Returns True if node `n` contains only inline (not block-level) elements.
def is_inline(n):
# if len(n.childNodes) == 0:
# return n.nodeType == n.TEXT_NODE or n.nodeName == 'computeroutput'
for c in n.childNodes:
if c.nodeType != c.TEXT_NODE and c.nodeName != 'computeroutput':
return False
if not is_inline(c):
return False
return True
def doc_wrap(text, prefix='', width=70, func=False, indent=None):
"""Wraps text to `width`.
First line is prefixed with `prefix`, subsequent lines are aligned.
If `func` is True, only wrap at commas.
"""
if not width:
# return prefix + text
return text
# Whitespace used to indent all lines except the first line.
indent = ' ' * len(prefix) if indent is None else indent
indent_only = (prefix == '' and indent is not None)
if func:
lines = [prefix]
for part in text.split(', '):
if part[-1] not in ');':
part += ', '
if len(lines[-1]) + len(part) > width:
lines.append(indent)
lines[-1] += part
return '\n'.join(x.rstrip() for x in lines).rstrip()
# XXX: Dummy prefix to force TextWrapper() to wrap the first line.
if indent_only:
prefix = indent
tw = textwrap.TextWrapper(break_long_words=False,
break_on_hyphens=False,
width=width,
initial_indent=prefix,
subsequent_indent=indent)
result = '\n'.join(tw.wrap(text.strip()))
# XXX: Remove the dummy prefix.
if indent_only:
result = result[len(indent):]
return result
def max_name(names):
if len(names) == 0:
return 0
return max(len(name) for name in names)
def update_params_map(parent, ret_map, width=62):
"""Updates `ret_map` with name:desc key-value pairs extracted
from Doxygen XML node `parent`.
"""
params = collections.OrderedDict()
for node in parent.childNodes:
if node.nodeType == node.TEXT_NODE:
continue
name_node = find_first(node, 'parametername')
if name_node.getAttribute('direction') == 'out':
continue
name = get_text(name_node)
if name in param_exclude:
continue
params[name.strip()] = node
max_name_len = max_name(params.keys()) + 8
# `ret_map` is a name:desc map.
for name, node in params.items():
desc = ''
desc_node = get_child(node, 'parameterdescription')
if desc_node:
desc = fmt_node_as_vimhelp(
desc_node, width=width, indent=(' ' * max_name_len))
ret_map[name] = desc
return ret_map
def render_node(n, text, prefix='', indent='', width=62, fmt_vimhelp=False):
"""Renders a node as Vim help text, recursively traversing all descendants."""
def ind(s):
return s if fmt_vimhelp else ''
text = ''
# space_preceding = (len(text) > 0 and ' ' == text[-1][-1])
# text += (int(not space_preceding) * ' ')
if n.nodeName == 'preformatted':
o = get_text(n, preformatted=True)
ensure_nl = '' if o[-1] == '\n' else '\n'
text += '>{}{}\n<'.format(ensure_nl, o)
elif is_inline(n):
text = doc_wrap(get_text(n), indent=indent, width=width)
elif n.nodeName == 'verbatim':
# TODO: currently we don't use this. The "[verbatim]" hint is there as
# a reminder that we must decide how to format this if we do use it.
text += ' [verbatim] {}'.format(get_text(n))
elif n.nodeName == 'listitem':
for c in n.childNodes:
result = render_node(
c,
text,
indent=indent + (' ' * len(prefix)),
width=width
)
if is_blank(result):
continue
text += indent + prefix + result
elif n.nodeName in ('para', 'heading'):
for c in n.childNodes:
if (is_inline(c)
and '' != get_text(c).strip()
and text
and ' ' != text[-1]):
text += ' '
text += render_node(c, text, indent=indent, width=width)
elif n.nodeName == 'itemizedlist':
for c in n.childNodes:
text += '{}\n'.format(render_node(c, text, prefix='• ',
indent=indent, width=width))
elif n.nodeName == 'orderedlist':
i = 1
for c in n.childNodes:
if is_blank(get_text(c)):
text += '\n'
continue
text += '{}\n'.format(render_node(c, text, prefix='{}. '.format(i),
indent=indent, width=width))
i = i + 1
elif n.nodeName == 'simplesect' and 'note' == n.getAttribute('kind'):
text += '\nNote:\n '
for c in n.childNodes:
text += render_node(c, text, indent=' ', width=width)
text += '\n'
elif n.nodeName == 'simplesect' and 'warning' == n.getAttribute('kind'):
text += 'Warning:\n '
for c in n.childNodes:
text += render_node(c, text, indent=' ', width=width)
text += '\n'
elif (n.nodeName == 'simplesect'
and n.getAttribute('kind') in ('return', 'see')):
text += ind(' ')
for c in n.childNodes:
text += render_node(c, text, indent=' ', width=width)
elif n.nodeName == 'computeroutput':
return get_text(n)
else:
raise RuntimeError('unhandled node type: {}\n{}'.format(
n.nodeName, n.toprettyxml(indent=' ', newl='\n')))
return text
def para_as_map(parent, indent='', width=62, fmt_vimhelp=False):
"""Extracts a Doxygen XML <para> node to a map.
Keys:
'text': Text from this <para> element
'params': <parameterlist> map
'return': List of @return strings
'seealso': List of @see strings
'xrefs': ?
"""
chunks = {
'text': '',
'params': collections.OrderedDict(),
'return': [],
'seealso': [],
'xrefs': []
}
# Ordered dict of ordered lists.
groups = collections.OrderedDict([
('params', []),
('return', []),
('seealso', []),
('xrefs', []),
])
# Gather nodes into groups. Mostly this is because we want "parameterlist"
# nodes to appear together.
text = ''
kind = ''
last = ''
if is_inline(parent):
# Flatten inline text from a tree of non-block nodes.
text = doc_wrap(render_node(parent, "", fmt_vimhelp=fmt_vimhelp),
indent=indent, width=width)
else:
prev = None # Previous node
for child in parent.childNodes:
if child.nodeName == 'parameterlist':
groups['params'].append(child)
elif child.nodeName == 'xrefsect':
groups['xrefs'].append(child)
elif child.nodeName == 'simplesect':
last = kind
kind = child.getAttribute('kind')
if kind == 'return' or (kind == 'note' and last == 'return'):
groups['return'].append(child)
elif kind == 'see':
groups['seealso'].append(child)
elif kind in ('note', 'warning'):
text += render_node(child, text, indent=indent,
width=width, fmt_vimhelp=fmt_vimhelp)
else:
raise RuntimeError('unhandled simplesect: {}\n{}'.format(
child.nodeName, child.toprettyxml(indent=' ', newl='\n')))
else:
if (prev is not None
and is_inline(self_or_child(prev))
and is_inline(self_or_child(child))
and '' != get_text(self_or_child(child)).strip()
and text
and ' ' != text[-1]):
text += ' '
text += render_node(child, text, indent=indent, width=width,
fmt_vimhelp=fmt_vimhelp)
prev = child
chunks['text'] += text
# Generate map from the gathered items.
if len(groups['params']) > 0:
for child in groups['params']:
update_params_map(child, ret_map=chunks['params'], width=width)
for child in groups['return']:
chunks['return'].append(render_node(
child, '', indent=indent, width=width, fmt_vimhelp=fmt_vimhelp))
for child in groups['seealso']:
chunks['seealso'].append(render_node(
child, '', indent=indent, width=width, fmt_vimhelp=fmt_vimhelp))
xrefs = set()
for child in groups['xrefs']:
# XXX: Add a space (or any char) to `title` here, otherwise xrefs
# ("Deprecated" section) acts very weird...
title = get_text(get_child(child, 'xreftitle')) + ' '
xrefs.add(title)
xrefdesc = get_text(get_child(child, 'xrefdescription'))
chunks['xrefs'].append(doc_wrap(xrefdesc, prefix='{}: '.format(title),
width=width) + '\n')
return chunks, xrefs
def fmt_node_as_vimhelp(parent, width=62, indent='', fmt_vimhelp=False):
"""Renders (nested) Doxygen <para> nodes as Vim :help text.
NB: Blank lines in a docstring manifest as <para> tags.
"""
rendered_blocks = []
def fmt_param_doc(m):
"""Renders a params map as Vim :help text."""
max_name_len = max_name(m.keys()) + 4
out = ''
for name, desc in m.items():
name = ' {}'.format('{{{}}}'.format(name).ljust(max_name_len))
out += '{}{}\n'.format(name, desc)
return out.rstrip()
def has_nonexcluded_params(m):
"""Returns true if any of the given params has at least
one non-excluded item."""
if fmt_param_doc(m) != '':
return True
for child in parent.childNodes:
para, _ = para_as_map(child, indent, width, fmt_vimhelp)
# Generate text from the gathered items.
chunks = [para['text']]
if len(para['params']) > 0 and has_nonexcluded_params(para['params']):
chunks.append('\nParameters: ~')
chunks.append(fmt_param_doc(para['params']))
if len(para['return']) > 0:
chunks.append('\nReturn: ~')
for s in para['return']:
chunks.append(s)
if len(para['seealso']) > 0:
chunks.append('\nSee also: ~')
for s in para['seealso']:
chunks.append(s)
for s in para['xrefs']:
chunks.append(s)
rendered_blocks.append(clean_lines('\n'.join(chunks).strip()))
rendered_blocks.append('')
return clean_lines('\n'.join(rendered_blocks).strip())
def extract_from_xml(filename, target, width, fmt_vimhelp):
"""Extracts Doxygen info as maps without formatting the text.
Returns two maps:
1. Functions
2. Deprecated functions
The `fmt_vimhelp` variable controls some special cases for use by
fmt_doxygen_xml_as_vimhelp(). (TODO: ugly :)
"""
fns = {} # Map of func_name:docstring.
deprecated_fns = {} # Map of func_name:docstring.
dom = minidom.parse(filename)
compoundname = get_text(dom.getElementsByTagName('compoundname')[0])
for member in dom.getElementsByTagName('memberdef'):
if member.getAttribute('static') == 'yes' or \
member.getAttribute('kind') != 'function' or \
member.getAttribute('prot') == 'private' or \
get_text(get_child(member, 'name')).startswith('_'):
continue
loc = find_first(member, 'location')
if 'private' in loc.getAttribute('file'):
continue
return_type = get_text(get_child(member, 'type'))
if return_type == '':
continue
if return_type.startswith(('ArrayOf', 'DictionaryOf')):
parts = return_type.strip('_').split('_')
return_type = '{}({})'.format(parts[0], ', '.join(parts[1:]))
name = get_text(get_child(member, 'name'))
annotations = get_text(get_child(member, 'argsstring'))
if annotations and ')' in annotations:
annotations = annotations.rsplit(')', 1)[-1].strip()
# XXX: (doxygen 1.8.11) 'argsstring' only includes attributes of
# non-void functions. Special-case void functions here.
if name == 'nvim_get_mode' and len(annotations) == 0:
annotations += 'FUNC_API_FAST'
annotations = filter(None, map(lambda x: annotation_map.get(x),
annotations.split()))
params = []
type_length = 0
for param in iter_children(member, 'param'):
param_type = get_text(get_child(param, 'type')).strip()
param_name = ''
declname = get_child(param, 'declname')
if declname:
param_name = get_text(declname).strip()
elif CONFIG[target]['mode'] == 'lua':
# XXX: this is what lua2dox gives us...
param_name = param_type
param_type = ''
if param_name in param_exclude:
continue
if fmt_vimhelp and param_type.endswith('*'):
param_type = param_type.strip('* ')
param_name = '*' + param_name
type_length = max(type_length, len(param_type))
params.append((param_type, param_name))
# Handle Object Oriented style functions here.
# We make sure they have "self" in the parameters,
# and a parent function
if return_type.startswith('function') \
and len(return_type.split(' ')) >= 2 \
and any(x[1] == 'self' for x in params):
split_return = return_type.split(' ')
name = f'{split_return[1]}:{name}'
c_args = []
for param_type, param_name in params:
c_args.append((' ' if fmt_vimhelp else '') + (
'%s %s' % (param_type.ljust(type_length), param_name)).strip())
if not fmt_vimhelp:
pass
else:
fstem = '?'
if '.' in compoundname:
fstem = compoundname.split('.')[0]
fstem = CONFIG[target]['module_override'].get(fstem, fstem)
vimtag = CONFIG[target]['fn_helptag_fmt'](fstem, name)
prefix = '%s(' % name
suffix = '%s)' % ', '.join('{%s}' % a[1] for a in params
if a[0] not in ('void', 'Error'))
if not fmt_vimhelp:
c_decl = '%s %s(%s);' % (return_type, name, ', '.join(c_args))
signature = prefix + suffix
else:
c_decl = textwrap.indent('%s %s(\n%s\n);' % (return_type, name,
',\n'.join(c_args)),
' ')
# Minimum 8 chars between signature and vimtag
lhs = (width - 8) - len(vimtag)
if len(prefix) + len(suffix) > lhs:
signature = vimtag.rjust(width) + '\n'
signature += doc_wrap(suffix, width=width, prefix=prefix,
func=True)
else:
signature = prefix + suffix
signature += vimtag.rjust(width - len(signature))
# Tracks `xrefsect` titles. As of this writing, used only for separating
# deprecated functions.
xrefs_all = set()
paras = []
brief_desc = find_first(member, 'briefdescription')
if brief_desc:
for child in brief_desc.childNodes:
para, xrefs = para_as_map(child)
xrefs_all.update(xrefs)
desc = find_first(member, 'detaileddescription')
if desc:
for child in desc.childNodes:
para, xrefs = para_as_map(child)
paras.append(para)
xrefs_all.update(xrefs)
log.debug(
textwrap.indent(
re.sub(r'\n\s*\n+', '\n',
desc.toprettyxml(indent=' ', newl='\n')), ' ' * 16))
fn = {
'annotations': list(annotations),
'signature': signature,
'parameters': params,
'parameters_doc': collections.OrderedDict(),
'doc': [],
'return': [],
'seealso': [],
}
if fmt_vimhelp:
fn['desc_node'] = desc
fn['brief_desc_node'] = brief_desc
for m in paras:
if 'text' in m:
if not m['text'] == '':
fn['doc'].append(m['text'])
if 'params' in m:
# Merge OrderedDicts.
fn['parameters_doc'].update(m['params'])
if 'return' in m and len(m['return']) > 0:
fn['return'] += m['return']
if 'seealso' in m and len(m['seealso']) > 0:
fn['seealso'] += m['seealso']
if INCLUDE_C_DECL:
fn['c_decl'] = c_decl
if 'Deprecated' in str(xrefs_all):
deprecated_fns[name] = fn
elif name.startswith(CONFIG[target]['fn_name_prefix']):
fns[name] = fn
fns = collections.OrderedDict(sorted(
fns.items(),
key=lambda key_item_tuple: key_item_tuple[0].lower()))
deprecated_fns = collections.OrderedDict(sorted(deprecated_fns.items()))
return fns, deprecated_fns
def fmt_doxygen_xml_as_vimhelp(filename, target):
"""Entrypoint for generating Vim :help from from Doxygen XML.
Returns 3 items:
1. Vim help text for functions found in `filename`.
2. Vim help text for deprecated functions.
"""
fns_txt = {} # Map of func_name:vim-help-text.
deprecated_fns_txt = {} # Map of func_name:vim-help-text.
fns, _ = extract_from_xml(filename, target, text_width, True)
for name, fn in fns.items():
# Generate Vim :help for parameters.
if fn['desc_node']:
doc = fmt_node_as_vimhelp(fn['desc_node'], fmt_vimhelp=True)
if not doc and fn['brief_desc_node']:
doc = fmt_node_as_vimhelp(fn['brief_desc_node'])
if not doc:
doc = 'TODO: Documentation'
annotations = '\n'.join(fn['annotations'])
if annotations:
annotations = ('\n\nAttributes: ~\n' +
textwrap.indent(annotations, ' '))
i = doc.rfind('Parameters: ~')
if i == -1:
doc += annotations
else:
doc = doc[:i] + annotations + '\n\n' + doc[i:]
if INCLUDE_C_DECL:
doc += '\n\nC Declaration: ~\n>\n'
doc += fn['c_decl']
doc += '\n<'
func_doc = fn['signature'] + '\n'
func_doc += textwrap.indent(clean_lines(doc), ' ' * 16)
# Verbatim handling.
func_doc = re.sub(r'^\s+([<>])$', r'\1', func_doc, flags=re.M)
split_lines = func_doc.split('\n')
start = 0
while True:
try:
start = split_lines.index('>', start)
except ValueError:
break
try:
end = split_lines.index('<', start)
except ValueError:
break
split_lines[start + 1:end] = [
(' ' + x).rstrip()
for x in textwrap.dedent(
"\n".join(
split_lines[start+1:end]
)
).split("\n")
]
start = end
func_doc = "\n".join(split_lines)
if name.startswith(CONFIG[target]['fn_name_prefix']):
fns_txt[name] = func_doc
return ('\n\n'.join(list(fns_txt.values())),
'\n\n'.join(list(deprecated_fns_txt.values())))
def delete_lines_below(filename, tokenstr):
"""Deletes all lines below the line containing `tokenstr`, the line itself,
and one line above it.
"""
lines = open(filename).readlines()
i = 0
found = False
for i, line in enumerate(lines, 1):
if tokenstr in line:
found = True
break
if not found:
raise RuntimeError(f'not found: "{tokenstr}"')
i = max(0, i - 2)
with open(filename, 'wt') as fp:
fp.writelines(lines[0:i])
def main(config, args):
"""Generates:
1. Vim :help docs
2. *.mpack files for use by API clients
Doxygen is called and configured through stdin.
"""
for target in CONFIG:
if args.target is not None and target != args.target:
continue
mpack_file = os.path.join(
base_dir, 'runtime', 'doc',
CONFIG[target]['filename'].replace('.txt', '.mpack'))
if os.path.exists(mpack_file):
os.remove(mpack_file)
output_dir = out_dir.format(target=target)
log.info("Generating documentation for %s in folder %s",
target, output_dir)
debug = args.log_level >= logging.DEBUG
p = subprocess.Popen(
['doxygen', '-'],
stdin=subprocess.PIPE,
# silence warnings
# runtime/lua/vim/lsp.lua:209: warning: argument 'foo' not found
stderr=(subprocess.STDOUT if debug else subprocess.DEVNULL))
p.communicate(
config.format(
input=' '.join(
[f'"{file}"' for file in CONFIG[target]['files']]),
output=output_dir,
filter=filter_cmd,
file_patterns=CONFIG[target]['file_patterns'])
.encode('utf8')
)
if p.returncode:
sys.exit(p.returncode)
fn_map_full = {} # Collects all functions as each module is processed.
sections = {}
intros = {}
sep = '=' * text_width
base = os.path.join(output_dir, 'xml')
dom = minidom.parse(os.path.join(base, 'index.xml'))
# generate docs for section intros
for compound in dom.getElementsByTagName('compound'):
if compound.getAttribute('kind') != 'group':
continue
groupname = get_text(find_first(compound, 'name'))
groupxml = os.path.join(base, '%s.xml' %
compound.getAttribute('refid'))
group_parsed = minidom.parse(groupxml)
doc_list = []
brief_desc = find_first(group_parsed, 'briefdescription')
if brief_desc:
for child in brief_desc.childNodes:
doc_list.append(fmt_node_as_vimhelp(child))
desc = find_first(group_parsed, 'detaileddescription')
if desc:
doc = fmt_node_as_vimhelp(desc)
if doc:
doc_list.append(doc)
intros[groupname] = "\n".join(doc_list)
for compound in dom.getElementsByTagName('compound'):
if compound.getAttribute('kind') != 'file':
continue
filename = get_text(find_first(compound, 'name'))
if filename.endswith('.c') or filename.endswith('.lua'):
xmlfile = os.path.join(base,
'{}.xml'.format(compound.getAttribute('refid')))
# Extract unformatted (*.mpack).
fn_map, _ = extract_from_xml(xmlfile, target, 9999, False)
# Extract formatted (:help).
functions_text, deprecated_text = fmt_doxygen_xml_as_vimhelp(
os.path.join(base, '{}.xml'.format(
compound.getAttribute('refid'))), target)
if not functions_text and not deprecated_text:
continue
else:
name = os.path.splitext(
os.path.basename(filename))[0].lower()
sectname = name.upper() if name == 'ui' else name.title()
doc = ''
intro = intros.get(f'api-{name}')
if intro:
doc += '\n\n' + intro
if functions_text:
doc += '\n\n' + functions_text
if INCLUDE_DEPRECATED and deprecated_text:
doc += f'\n\n\nDeprecated {sectname} Functions: ~\n\n'
doc += deprecated_text
if doc:
filename = os.path.basename(filename)
sectname = CONFIG[target]['section_name'].get(
filename, sectname)
title = CONFIG[target]['section_fmt'](sectname)
helptag = CONFIG[target]['helptag_fmt'](sectname)
sections[filename] = (title, helptag, doc)
fn_map_full.update(fn_map)
if len(sections) == 0:
fail(f'no sections for target: {target}')
if len(sections) > len(CONFIG[target]['section_order']):
raise RuntimeError(
'found new modules "{}"; update the "section_order" map'.format(
set(sections).difference(CONFIG[target]['section_order'])))
first_section_tag = sections[CONFIG[target]['section_order'][0]][1]
docs = ''
i = 0
for filename in CONFIG[target]['section_order']:
try:
title, helptag, section_doc = sections.pop(filename)
except KeyError:
msg(f'warning: empty docs, skipping (target={target}): {filename}')
msg(f' existing docs: {sections.keys()}')
continue
i += 1
if filename not in CONFIG[target]['append_only']:
docs += sep
docs += '\n%s%s' % (title,
helptag.rjust(text_width - len(title)))
docs += section_doc
docs += '\n\n\n'
docs = docs.rstrip() + '\n\n'
docs += ' vim:tw=78:ts=8:ft=help:norl:\n'
doc_file = os.path.join(base_dir, 'runtime', 'doc',
CONFIG[target]['filename'])
if os.path.exists(doc_file):
delete_lines_below(doc_file, first_section_tag)
with open(doc_file, 'ab') as fp:
fp.write(docs.encode('utf8'))
fn_map_full = collections.OrderedDict(sorted(fn_map_full.items()))
with open(mpack_file, 'wb') as fp:
fp.write(msgpack.packb(fn_map_full, use_bin_type=True))
if not args.keep_tmpfiles:
shutil.rmtree(output_dir)
msg_report()
def filter_source(filename):
name, extension = os.path.splitext(filename)
if extension == '.lua':
p = subprocess.run([lua2dox_filter, filename], stdout=subprocess.PIPE)
op = ('?' if 0 != p.returncode else p.stdout.decode('utf-8'))
print(op)
else:
"""Filters the source to fix macros that confuse Doxygen."""
with open(filename, 'rt') as fp:
print(re.sub(r'^(ArrayOf|DictionaryOf)(\(.*?\))',
lambda m: m.group(1)+'_'.join(
re.split(r'[^\w]+', m.group(2))),
fp.read(), flags=re.M))
def parse_args():
targets = ', '.join(CONFIG.keys())
ap = argparse.ArgumentParser(
description="Generate helpdoc from source code")
ap.add_argument(
"--log-level", "-l", choices=LOG_LEVELS.keys(),
default=logging.getLevelName(logging.ERROR), help="Set log verbosity"
)
ap.add_argument('source_filter', nargs='*',
help="Filter source file(s)")
ap.add_argument('-k', '--keep-tmpfiles', action='store_true',
help="Keep temporary files")
ap.add_argument('-t', '--target',
help=f'One of ({targets}), defaults to "all"')
return ap.parse_args()
Doxyfile = textwrap.dedent('''
OUTPUT_DIRECTORY = {output}
INPUT = {input}
INPUT_ENCODING = UTF-8
FILE_PATTERNS = {file_patterns}
RECURSIVE = YES
INPUT_FILTER = "{filter}"
EXCLUDE =
EXCLUDE_SYMLINKS = NO
EXCLUDE_PATTERNS = */private/* */health.lua */_*.lua
EXCLUDE_SYMBOLS =
EXTENSION_MAPPING = lua=C
EXTRACT_PRIVATE = NO
GENERATE_HTML = NO
GENERATE_DOCSET = NO
GENERATE_HTMLHELP = NO
GENERATE_QHP = NO
GENERATE_TREEVIEW = NO
GENERATE_LATEX = NO
GENERATE_RTF = NO
GENERATE_MAN = NO
GENERATE_DOCBOOK = NO
GENERATE_AUTOGEN_DEF = NO
GENERATE_XML = YES
XML_OUTPUT = xml
XML_PROGRAMLISTING = NO
ENABLE_PREPROCESSING = YES
MACRO_EXPANSION = YES
EXPAND_ONLY_PREDEF = NO
MARKDOWN_SUPPORT = YES
''')
if __name__ == "__main__":
args = parse_args()
print("Setting log level to %s" % args.log_level)
args.log_level = LOG_LEVELS[args.log_level]
log.setLevel(args.log_level)
log.addHandler(logging.StreamHandler())
if len(args.source_filter) > 0:
filter_source(args.source_filter[0])
else:
main(Doxyfile, args)
# vim: set ft=python ts=4 sw=4 tw=79 et :
| 34.474465
| 87
| 0.533547
|
import argparse
import os
import re
import sys
import shutil
import textwrap
import subprocess
import collections
import msgpack
import logging
from xml.dom import minidom
MIN_PYTHON_VERSION = (3, 6)
MIN_DOXYGEN_VERSION = (1, 9, 0)
if sys.version_info < MIN_PYTHON_VERSION:
print("requires Python {}.{}+".format(*MIN_PYTHON_VERSION))
sys.exit(1)
doxygen_version = tuple([int(i) for i in subprocess.check_output(["doxygen", "-v"],
universal_newlines=True).split()[0].split('.')])
if doxygen_version < MIN_DOXYGEN_VERSION:
print("\nRequires doxygen {}.{}.{}+".format(*MIN_DOXYGEN_VERSION))
print("Your doxygen version is {}.{}.{}\n".format(*doxygen_version))
sys.exit(1)
INCLUDE_C_DECL = ('INCLUDE_C_DECL' in os.environ)
INCLUDE_DEPRECATED = ('INCLUDE_DEPRECATED' in os.environ)
log = logging.getLogger(__name__)
LOG_LEVELS = {
logging.getLevelName(level): level for level in [
logging.DEBUG, logging.INFO, logging.ERROR
]
}
text_width = 78
script_path = os.path.abspath(__file__)
base_dir = os.path.dirname(os.path.dirname(script_path))
out_dir = os.path.join(base_dir, 'tmp-{target}-doc')
filter_cmd = '%s %s' % (sys.executable, script_path)
msgs = []
lua2dox_filter = os.path.join(base_dir, 'scripts', 'lua2dox_filter')
CONFIG = {
'api': {
'mode': 'c',
'filename': 'api.txt',
'section_order': [
'vim.c',
'vimscript.c',
'buffer.c',
'extmark.c',
'window.c',
'win_config.c',
'tabpage.c',
'autocmd.c',
'ui.c',
],
'files': ['src/nvim/api'],
'file_patterns': '*.h *.c',
'fn_name_prefix': 'nvim_',
'section_name': {
'vim.c': 'Global',
},
'section_fmt': lambda name: f'{name} Functions',
'helptag_fmt': lambda name: f'*api-{name.lower()}*',
'fn_helptag_fmt': lambda fstem, name: f'*{name}()*',
'module_override': {},
'append_only': [],
},
'lua': {
'mode': 'lua',
'filename': 'lua.txt',
'section_order': [
'_editor.lua',
'shared.lua',
'uri.lua',
'ui.lua',
'filetype.lua',
'keymap.lua',
'fs.lua',
],
'files': [
'runtime/lua/vim/_editor.lua',
'runtime/lua/vim/shared.lua',
'runtime/lua/vim/uri.lua',
'runtime/lua/vim/ui.lua',
'runtime/lua/vim/filetype.lua',
'runtime/lua/vim/keymap.lua',
'runtime/lua/vim/fs.lua',
],
'file_patterns': '*.lua',
'fn_name_prefix': '',
'section_name': {
'lsp.lua': 'core',
},
'section_fmt': lambda name: (
'Lua module: vim'
if name.lower() == '_editor'
else f'Lua module: {name.lower()}'),
'helptag_fmt': lambda name: (
'*lua-vim*'
if name.lower() == '_editor'
else f'*lua-{name.lower()}*'),
'fn_helptag_fmt': lambda fstem, name: (
f'*vim.{name}()*'
if fstem.lower() == '_editor'
else f'*{fstem}.{name}()*'),
'module_override': {
'shared': 'vim',
'uri': 'vim',
'ui': 'vim.ui',
'filetype': 'vim.filetype',
'keymap': 'vim.keymap',
'fs': 'vim.fs',
},
'append_only': [
'shared.lua',
],
},
'lsp': {
'mode': 'lua',
'filename': 'lsp.txt',
'section_order': [
'lsp.lua',
'buf.lua',
'diagnostic.lua',
'codelens.lua',
'tagfunc.lua',
'handlers.lua',
'util.lua',
'log.lua',
'rpc.lua',
'sync.lua',
'protocol.lua',
],
'files': [
'runtime/lua/vim/lsp',
'runtime/lua/vim/lsp.lua',
],
'file_patterns': '*.lua',
'fn_name_prefix': '',
'section_name': {'lsp.lua': 'lsp'},
'section_fmt': lambda name: (
'Lua module: vim.lsp'
if name.lower() == 'lsp'
else f'Lua module: vim.lsp.{name.lower()}'),
'helptag_fmt': lambda name: (
'*lsp-core*'
if name.lower() == 'lsp'
else f'*lsp-{name.lower()}*'),
'fn_helptag_fmt': lambda fstem, name: (
f'*vim.lsp.{name}()*'
if fstem == 'lsp' and name != 'client'
else (
'*vim.lsp.client*'
if 'lsp.client' == f'{fstem}.{name}'
else f'*vim.lsp.{fstem}.{name}()*')),
'module_override': {},
'append_only': [],
},
'diagnostic': {
'mode': 'lua',
'filename': 'diagnostic.txt',
'section_order': [
'diagnostic.lua',
],
'files': ['runtime/lua/vim/diagnostic.lua'],
'file_patterns': '*.lua',
'fn_name_prefix': '',
'section_name': {'diagnostic.lua': 'diagnostic'},
'section_fmt': lambda _: 'Lua module: vim.diagnostic',
'helptag_fmt': lambda _: '*diagnostic-api*',
'fn_helptag_fmt': lambda fstem, name: f'*vim.{fstem}.{name}()*',
'module_override': {},
'append_only': [],
},
'treesitter': {
'mode': 'lua',
'filename': 'treesitter.txt',
'section_order': [
'treesitter.lua',
'language.lua',
'query.lua',
'highlighter.lua',
'languagetree.lua',
],
'files': [
'runtime/lua/vim/treesitter.lua',
'runtime/lua/vim/treesitter/',
],
'file_patterns': '*.lua',
'fn_name_prefix': '',
'section_name': {},
'section_fmt': lambda name: (
'Lua module: vim.treesitter'
if name.lower() == 'treesitter'
else f'Lua module: vim.treesitter.{name.lower()}'),
'helptag_fmt': lambda name: (
'*lua-treesitter-core*'
if name.lower() == 'treesitter'
else f'*treesitter-{name.lower()}*'),
'fn_helptag_fmt': lambda fstem, name: (
f'*{name}()*'
if name != 'new'
else f'*{fstem}.{name}()*'),
'append_only': [],
}
}
param_exclude = (
'channel_id',
)
annotation_map = {
'FUNC_API_FAST': '|api-fast|',
'FUNC_API_CHECK_TEXTLOCK': 'not allowed when |textlock| is active',
'FUNC_API_REMOTE_ONLY': '|RPC| only',
'FUNC_API_LUA_ONLY': '|vim.api| only',
}
def debug_this(o, cond=True):
name = ''
if not isinstance(o, str):
try:
name = o.nodeName
o = o.toprettyxml(indent=' ', newl='\n')
except Exception:
pass
if ((callable(cond) and cond())
or (not callable(cond) and cond)
or (not callable(cond) and cond in o)):
raise RuntimeError('xxx: {}\n{}'.format(name, o))
def msg(s):
msgs.append(s)
def msg_report():
for m in msgs:
print(f' {m}')
def fail(s):
msg_report()
raise RuntimeError(s)
def find_first(parent, name):
sub = parent.getElementsByTagName(name)
if not sub:
return None
return sub[0]
def iter_children(parent, name):
for child in parent.childNodes:
if child.nodeType == child.ELEMENT_NODE and child.nodeName == name:
yield child
def get_child(parent, name):
for child in iter_children(parent, name):
return child
return None
def self_or_child(n):
if len(n.childNodes) == 0:
return n
return n.childNodes[0]
def clean_lines(text):
return re.sub(r'\A\n\s*\n*|\n\s*\n*\Z', '', re.sub(r'(\n\s*\n+)+', '\n\n', text))
def is_blank(text):
return '' == clean_lines(text)
def get_text(n, preformatted=False):
text = ''
if n.nodeType == n.TEXT_NODE:
return n.data
if n.nodeName == 'computeroutput':
for node in n.childNodes:
text += get_text(node)
return '`{}`'.format(text)
for node in n.childNodes:
if node.nodeType == node.TEXT_NODE:
text += node.data
elif node.nodeType == node.ELEMENT_NODE:
text += get_text(node, preformatted)
return text
def len_lastline(text):
lastnl = text.rfind('\n')
if -1 == lastnl:
return len(text)
if '\n' == text[-1]:
return lastnl - (1 + text.rfind('\n', 0, lastnl))
return len(text) - (1 + lastnl)
def len_lastline_withoutindent(text, indent):
n = len_lastline(text)
return (n - len(indent)) if n > len(indent) else 0
def is_inline(n):
for c in n.childNodes:
if c.nodeType != c.TEXT_NODE and c.nodeName != 'computeroutput':
return False
if not is_inline(c):
return False
return True
def doc_wrap(text, prefix='', width=70, func=False, indent=None):
if not width:
return text
indent = ' ' * len(prefix) if indent is None else indent
indent_only = (prefix == '' and indent is not None)
if func:
lines = [prefix]
for part in text.split(', '):
if part[-1] not in ');':
part += ', '
if len(lines[-1]) + len(part) > width:
lines.append(indent)
lines[-1] += part
return '\n'.join(x.rstrip() for x in lines).rstrip()
if indent_only:
prefix = indent
tw = textwrap.TextWrapper(break_long_words=False,
break_on_hyphens=False,
width=width,
initial_indent=prefix,
subsequent_indent=indent)
result = '\n'.join(tw.wrap(text.strip()))
if indent_only:
result = result[len(indent):]
return result
def max_name(names):
if len(names) == 0:
return 0
return max(len(name) for name in names)
def update_params_map(parent, ret_map, width=62):
params = collections.OrderedDict()
for node in parent.childNodes:
if node.nodeType == node.TEXT_NODE:
continue
name_node = find_first(node, 'parametername')
if name_node.getAttribute('direction') == 'out':
continue
name = get_text(name_node)
if name in param_exclude:
continue
params[name.strip()] = node
max_name_len = max_name(params.keys()) + 8
for name, node in params.items():
desc = ''
desc_node = get_child(node, 'parameterdescription')
if desc_node:
desc = fmt_node_as_vimhelp(
desc_node, width=width, indent=(' ' * max_name_len))
ret_map[name] = desc
return ret_map
def render_node(n, text, prefix='', indent='', width=62, fmt_vimhelp=False):
def ind(s):
return s if fmt_vimhelp else ''
text = ''
if n.nodeName == 'preformatted':
o = get_text(n, preformatted=True)
ensure_nl = '' if o[-1] == '\n' else '\n'
text += '>{}{}\n<'.format(ensure_nl, o)
elif is_inline(n):
text = doc_wrap(get_text(n), indent=indent, width=width)
elif n.nodeName == 'verbatim':
# a reminder that we must decide how to format this if we do use it.
text += ' [verbatim] {}'.format(get_text(n))
elif n.nodeName == 'listitem':
for c in n.childNodes:
result = render_node(
c,
text,
indent=indent + (' ' * len(prefix)),
width=width
)
if is_blank(result):
continue
text += indent + prefix + result
elif n.nodeName in ('para', 'heading'):
for c in n.childNodes:
if (is_inline(c)
and '' != get_text(c).strip()
and text
and ' ' != text[-1]):
text += ' '
text += render_node(c, text, indent=indent, width=width)
elif n.nodeName == 'itemizedlist':
for c in n.childNodes:
text += '{}\n'.format(render_node(c, text, prefix='• ',
indent=indent, width=width))
elif n.nodeName == 'orderedlist':
i = 1
for c in n.childNodes:
if is_blank(get_text(c)):
text += '\n'
continue
text += '{}\n'.format(render_node(c, text, prefix='{}. '.format(i),
indent=indent, width=width))
i = i + 1
elif n.nodeName == 'simplesect' and 'note' == n.getAttribute('kind'):
text += '\nNote:\n '
for c in n.childNodes:
text += render_node(c, text, indent=' ', width=width)
text += '\n'
elif n.nodeName == 'simplesect' and 'warning' == n.getAttribute('kind'):
text += 'Warning:\n '
for c in n.childNodes:
text += render_node(c, text, indent=' ', width=width)
text += '\n'
elif (n.nodeName == 'simplesect'
and n.getAttribute('kind') in ('return', 'see')):
text += ind(' ')
for c in n.childNodes:
text += render_node(c, text, indent=' ', width=width)
elif n.nodeName == 'computeroutput':
return get_text(n)
else:
raise RuntimeError('unhandled node type: {}\n{}'.format(
n.nodeName, n.toprettyxml(indent=' ', newl='\n')))
return text
def para_as_map(parent, indent='', width=62, fmt_vimhelp=False):
chunks = {
'text': '',
'params': collections.OrderedDict(),
'return': [],
'seealso': [],
'xrefs': []
}
# Ordered dict of ordered lists.
groups = collections.OrderedDict([
('params', []),
('return', []),
('seealso', []),
('xrefs', []),
])
# Gather nodes into groups. Mostly this is because we want "parameterlist"
# nodes to appear together.
text = ''
kind = ''
last = ''
if is_inline(parent):
# Flatten inline text from a tree of non-block nodes.
text = doc_wrap(render_node(parent, "", fmt_vimhelp=fmt_vimhelp),
indent=indent, width=width)
else:
prev = None # Previous node
for child in parent.childNodes:
if child.nodeName == 'parameterlist':
groups['params'].append(child)
elif child.nodeName == 'xrefsect':
groups['xrefs'].append(child)
elif child.nodeName == 'simplesect':
last = kind
kind = child.getAttribute('kind')
if kind == 'return' or (kind == 'note' and last == 'return'):
groups['return'].append(child)
elif kind == 'see':
groups['seealso'].append(child)
elif kind in ('note', 'warning'):
text += render_node(child, text, indent=indent,
width=width, fmt_vimhelp=fmt_vimhelp)
else:
raise RuntimeError('unhandled simplesect: {}\n{}'.format(
child.nodeName, child.toprettyxml(indent=' ', newl='\n')))
else:
if (prev is not None
and is_inline(self_or_child(prev))
and is_inline(self_or_child(child))
and '' != get_text(self_or_child(child)).strip()
and text
and ' ' != text[-1]):
text += ' '
text += render_node(child, text, indent=indent, width=width,
fmt_vimhelp=fmt_vimhelp)
prev = child
chunks['text'] += text
# Generate map from the gathered items.
if len(groups['params']) > 0:
for child in groups['params']:
update_params_map(child, ret_map=chunks['params'], width=width)
for child in groups['return']:
chunks['return'].append(render_node(
child, '', indent=indent, width=width, fmt_vimhelp=fmt_vimhelp))
for child in groups['seealso']:
chunks['seealso'].append(render_node(
child, '', indent=indent, width=width, fmt_vimhelp=fmt_vimhelp))
xrefs = set()
for child in groups['xrefs']:
# XXX: Add a space (or any char) to `title` here, otherwise xrefs
# ("Deprecated" section) acts very weird...
title = get_text(get_child(child, 'xreftitle')) + ' '
xrefs.add(title)
xrefdesc = get_text(get_child(child, 'xrefdescription'))
chunks['xrefs'].append(doc_wrap(xrefdesc, prefix='{}: '.format(title),
width=width) + '\n')
return chunks, xrefs
def fmt_node_as_vimhelp(parent, width=62, indent='', fmt_vimhelp=False):
rendered_blocks = []
def fmt_param_doc(m):
max_name_len = max_name(m.keys()) + 4
out = ''
for name, desc in m.items():
name = ' {}'.format('{{{}}}'.format(name).ljust(max_name_len))
out += '{}{}\n'.format(name, desc)
return out.rstrip()
def has_nonexcluded_params(m):
if fmt_param_doc(m) != '':
return True
for child in parent.childNodes:
para, _ = para_as_map(child, indent, width, fmt_vimhelp)
# Generate text from the gathered items.
chunks = [para['text']]
if len(para['params']) > 0 and has_nonexcluded_params(para['params']):
chunks.append('\nParameters: ~')
chunks.append(fmt_param_doc(para['params']))
if len(para['return']) > 0:
chunks.append('\nReturn: ~')
for s in para['return']:
chunks.append(s)
if len(para['seealso']) > 0:
chunks.append('\nSee also: ~')
for s in para['seealso']:
chunks.append(s)
for s in para['xrefs']:
chunks.append(s)
rendered_blocks.append(clean_lines('\n'.join(chunks).strip()))
rendered_blocks.append('')
return clean_lines('\n'.join(rendered_blocks).strip())
def extract_from_xml(filename, target, width, fmt_vimhelp):
fns = {} # Map of func_name:docstring.
deprecated_fns = {} # Map of func_name:docstring.
dom = minidom.parse(filename)
compoundname = get_text(dom.getElementsByTagName('compoundname')[0])
for member in dom.getElementsByTagName('memberdef'):
if member.getAttribute('static') == 'yes' or \
member.getAttribute('kind') != 'function' or \
member.getAttribute('prot') == 'private' or \
get_text(get_child(member, 'name')).startswith('_'):
continue
loc = find_first(member, 'location')
if 'private' in loc.getAttribute('file'):
continue
return_type = get_text(get_child(member, 'type'))
if return_type == '':
continue
if return_type.startswith(('ArrayOf', 'DictionaryOf')):
parts = return_type.strip('_').split('_')
return_type = '{}({})'.format(parts[0], ', '.join(parts[1:]))
name = get_text(get_child(member, 'name'))
annotations = get_text(get_child(member, 'argsstring'))
if annotations and ')' in annotations:
annotations = annotations.rsplit(')', 1)[-1].strip()
# XXX: (doxygen 1.8.11) 'argsstring' only includes attributes of
# non-void functions. Special-case void functions here.
if name == 'nvim_get_mode' and len(annotations) == 0:
annotations += 'FUNC_API_FAST'
annotations = filter(None, map(lambda x: annotation_map.get(x),
annotations.split()))
params = []
type_length = 0
for param in iter_children(member, 'param'):
param_type = get_text(get_child(param, 'type')).strip()
param_name = ''
declname = get_child(param, 'declname')
if declname:
param_name = get_text(declname).strip()
elif CONFIG[target]['mode'] == 'lua':
# XXX: this is what lua2dox gives us...
param_name = param_type
param_type = ''
if param_name in param_exclude:
continue
if fmt_vimhelp and param_type.endswith('*'):
param_type = param_type.strip('* ')
param_name = '*' + param_name
type_length = max(type_length, len(param_type))
params.append((param_type, param_name))
# Handle Object Oriented style functions here.
# We make sure they have "self" in the parameters,
# and a parent function
if return_type.startswith('function') \
and len(return_type.split(' ')) >= 2 \
and any(x[1] == 'self' for x in params):
split_return = return_type.split(' ')
name = f'{split_return[1]}:{name}'
c_args = []
for param_type, param_name in params:
c_args.append((' ' if fmt_vimhelp else '') + (
'%s %s' % (param_type.ljust(type_length), param_name)).strip())
if not fmt_vimhelp:
pass
else:
fstem = '?'
if '.' in compoundname:
fstem = compoundname.split('.')[0]
fstem = CONFIG[target]['module_override'].get(fstem, fstem)
vimtag = CONFIG[target]['fn_helptag_fmt'](fstem, name)
prefix = '%s(' % name
suffix = '%s)' % ', '.join('{%s}' % a[1] for a in params
if a[0] not in ('void', 'Error'))
if not fmt_vimhelp:
c_decl = '%s %s(%s);' % (return_type, name, ', '.join(c_args))
signature = prefix + suffix
else:
c_decl = textwrap.indent('%s %s(\n%s\n);' % (return_type, name,
',\n'.join(c_args)),
' ')
# Minimum 8 chars between signature and vimtag
lhs = (width - 8) - len(vimtag)
if len(prefix) + len(suffix) > lhs:
signature = vimtag.rjust(width) + '\n'
signature += doc_wrap(suffix, width=width, prefix=prefix,
func=True)
else:
signature = prefix + suffix
signature += vimtag.rjust(width - len(signature))
# Tracks `xrefsect` titles. As of this writing, used only for separating
# deprecated functions.
xrefs_all = set()
paras = []
brief_desc = find_first(member, 'briefdescription')
if brief_desc:
for child in brief_desc.childNodes:
para, xrefs = para_as_map(child)
xrefs_all.update(xrefs)
desc = find_first(member, 'detaileddescription')
if desc:
for child in desc.childNodes:
para, xrefs = para_as_map(child)
paras.append(para)
xrefs_all.update(xrefs)
log.debug(
textwrap.indent(
re.sub(r'\n\s*\n+', '\n',
desc.toprettyxml(indent=' ', newl='\n')), ' ' * 16))
fn = {
'annotations': list(annotations),
'signature': signature,
'parameters': params,
'parameters_doc': collections.OrderedDict(),
'doc': [],
'return': [],
'seealso': [],
}
if fmt_vimhelp:
fn['desc_node'] = desc
fn['brief_desc_node'] = brief_desc
for m in paras:
if 'text' in m:
if not m['text'] == '':
fn['doc'].append(m['text'])
if 'params' in m:
# Merge OrderedDicts.
fn['parameters_doc'].update(m['params'])
if 'return' in m and len(m['return']) > 0:
fn['return'] += m['return']
if 'seealso' in m and len(m['seealso']) > 0:
fn['seealso'] += m['seealso']
if INCLUDE_C_DECL:
fn['c_decl'] = c_decl
if 'Deprecated' in str(xrefs_all):
deprecated_fns[name] = fn
elif name.startswith(CONFIG[target]['fn_name_prefix']):
fns[name] = fn
fns = collections.OrderedDict(sorted(
fns.items(),
key=lambda key_item_tuple: key_item_tuple[0].lower()))
deprecated_fns = collections.OrderedDict(sorted(deprecated_fns.items()))
return fns, deprecated_fns
def fmt_doxygen_xml_as_vimhelp(filename, target):
fns_txt = {} # Map of func_name:vim-help-text.
deprecated_fns_txt = {} # Map of func_name:vim-help-text.
fns, _ = extract_from_xml(filename, target, text_width, True)
for name, fn in fns.items():
# Generate Vim :help for parameters.
if fn['desc_node']:
doc = fmt_node_as_vimhelp(fn['desc_node'], fmt_vimhelp=True)
if not doc and fn['brief_desc_node']:
doc = fmt_node_as_vimhelp(fn['brief_desc_node'])
if not doc:
doc = 'TODO: Documentation'
annotations = '\n'.join(fn['annotations'])
if annotations:
annotations = ('\n\nAttributes: ~\n' +
textwrap.indent(annotations, ' '))
i = doc.rfind('Parameters: ~')
if i == -1:
doc += annotations
else:
doc = doc[:i] + annotations + '\n\n' + doc[i:]
if INCLUDE_C_DECL:
doc += '\n\nC Declaration: ~\n>\n'
doc += fn['c_decl']
doc += '\n<'
func_doc = fn['signature'] + '\n'
func_doc += textwrap.indent(clean_lines(doc), ' ' * 16)
# Verbatim handling.
func_doc = re.sub(r'^\s+([<>])$', r'\1', func_doc, flags=re.M)
split_lines = func_doc.split('\n')
start = 0
while True:
try:
start = split_lines.index('>', start)
except ValueError:
break
try:
end = split_lines.index('<', start)
except ValueError:
break
split_lines[start + 1:end] = [
(' ' + x).rstrip()
for x in textwrap.dedent(
"\n".join(
split_lines[start+1:end]
)
).split("\n")
]
start = end
func_doc = "\n".join(split_lines)
if name.startswith(CONFIG[target]['fn_name_prefix']):
fns_txt[name] = func_doc
return ('\n\n'.join(list(fns_txt.values())),
'\n\n'.join(list(deprecated_fns_txt.values())))
def delete_lines_below(filename, tokenstr):
lines = open(filename).readlines()
i = 0
found = False
for i, line in enumerate(lines, 1):
if tokenstr in line:
found = True
break
if not found:
raise RuntimeError(f'not found: "{tokenstr}"')
i = max(0, i - 2)
with open(filename, 'wt') as fp:
fp.writelines(lines[0:i])
def main(config, args):
for target in CONFIG:
if args.target is not None and target != args.target:
continue
mpack_file = os.path.join(
base_dir, 'runtime', 'doc',
CONFIG[target]['filename'].replace('.txt', '.mpack'))
if os.path.exists(mpack_file):
os.remove(mpack_file)
output_dir = out_dir.format(target=target)
log.info("Generating documentation for %s in folder %s",
target, output_dir)
debug = args.log_level >= logging.DEBUG
p = subprocess.Popen(
['doxygen', '-'],
stdin=subprocess.PIPE,
# silence warnings
# runtime/lua/vim/lsp.lua:209: warning: argument 'foo' not found
stderr=(subprocess.STDOUT if debug else subprocess.DEVNULL))
p.communicate(
config.format(
input=' '.join(
[f'"{file}"' for file in CONFIG[target]['files']]),
output=output_dir,
filter=filter_cmd,
file_patterns=CONFIG[target]['file_patterns'])
.encode('utf8')
)
if p.returncode:
sys.exit(p.returncode)
fn_map_full = {} # Collects all functions as each module is processed.
sections = {}
intros = {}
sep = '=' * text_width
base = os.path.join(output_dir, 'xml')
dom = minidom.parse(os.path.join(base, 'index.xml'))
# generate docs for section intros
for compound in dom.getElementsByTagName('compound'):
if compound.getAttribute('kind') != 'group':
continue
groupname = get_text(find_first(compound, 'name'))
groupxml = os.path.join(base, '%s.xml' %
compound.getAttribute('refid'))
group_parsed = minidom.parse(groupxml)
doc_list = []
brief_desc = find_first(group_parsed, 'briefdescription')
if brief_desc:
for child in brief_desc.childNodes:
doc_list.append(fmt_node_as_vimhelp(child))
desc = find_first(group_parsed, 'detaileddescription')
if desc:
doc = fmt_node_as_vimhelp(desc)
if doc:
doc_list.append(doc)
intros[groupname] = "\n".join(doc_list)
for compound in dom.getElementsByTagName('compound'):
if compound.getAttribute('kind') != 'file':
continue
filename = get_text(find_first(compound, 'name'))
if filename.endswith('.c') or filename.endswith('.lua'):
xmlfile = os.path.join(base,
'{}.xml'.format(compound.getAttribute('refid')))
# Extract unformatted (*.mpack).
fn_map, _ = extract_from_xml(xmlfile, target, 9999, False)
# Extract formatted (:help).
functions_text, deprecated_text = fmt_doxygen_xml_as_vimhelp(
os.path.join(base, '{}.xml'.format(
compound.getAttribute('refid'))), target)
if not functions_text and not deprecated_text:
continue
else:
name = os.path.splitext(
os.path.basename(filename))[0].lower()
sectname = name.upper() if name == 'ui' else name.title()
doc = ''
intro = intros.get(f'api-{name}')
if intro:
doc += '\n\n' + intro
if functions_text:
doc += '\n\n' + functions_text
if INCLUDE_DEPRECATED and deprecated_text:
doc += f'\n\n\nDeprecated {sectname} Functions: ~\n\n'
doc += deprecated_text
if doc:
filename = os.path.basename(filename)
sectname = CONFIG[target]['section_name'].get(
filename, sectname)
title = CONFIG[target]['section_fmt'](sectname)
helptag = CONFIG[target]['helptag_fmt'](sectname)
sections[filename] = (title, helptag, doc)
fn_map_full.update(fn_map)
if len(sections) == 0:
fail(f'no sections for target: {target}')
if len(sections) > len(CONFIG[target]['section_order']):
raise RuntimeError(
'found new modules "{}"; update the "section_order" map'.format(
set(sections).difference(CONFIG[target]['section_order'])))
first_section_tag = sections[CONFIG[target]['section_order'][0]][1]
docs = ''
i = 0
for filename in CONFIG[target]['section_order']:
try:
title, helptag, section_doc = sections.pop(filename)
except KeyError:
msg(f'warning: empty docs, skipping (target={target}): {filename}')
msg(f' existing docs: {sections.keys()}')
continue
i += 1
if filename not in CONFIG[target]['append_only']:
docs += sep
docs += '\n%s%s' % (title,
helptag.rjust(text_width - len(title)))
docs += section_doc
docs += '\n\n\n'
docs = docs.rstrip() + '\n\n'
docs += ' vim:tw=78:ts=8:ft=help:norl:\n'
doc_file = os.path.join(base_dir, 'runtime', 'doc',
CONFIG[target]['filename'])
if os.path.exists(doc_file):
delete_lines_below(doc_file, first_section_tag)
with open(doc_file, 'ab') as fp:
fp.write(docs.encode('utf8'))
fn_map_full = collections.OrderedDict(sorted(fn_map_full.items()))
with open(mpack_file, 'wb') as fp:
fp.write(msgpack.packb(fn_map_full, use_bin_type=True))
if not args.keep_tmpfiles:
shutil.rmtree(output_dir)
msg_report()
def filter_source(filename):
name, extension = os.path.splitext(filename)
if extension == '.lua':
p = subprocess.run([lua2dox_filter, filename], stdout=subprocess.PIPE)
op = ('?' if 0 != p.returncode else p.stdout.decode('utf-8'))
print(op)
else:
"""Filters the source to fix macros that confuse Doxygen."""
with open(filename, 'rt') as fp:
print(re.sub(r'^(ArrayOf|DictionaryOf)(\(.*?\))',
lambda m: m.group(1)+'_'.join(
re.split(r'[^\w]+', m.group(2))),
fp.read(), flags=re.M))
def parse_args():
targets = ', '.join(CONFIG.keys())
ap = argparse.ArgumentParser(
description="Generate helpdoc from source code")
ap.add_argument(
"--log-level", "-l", choices=LOG_LEVELS.keys(),
default=logging.getLevelName(logging.ERROR), help="Set log verbosity"
)
ap.add_argument('source_filter', nargs='*',
help="Filter source file(s)")
ap.add_argument('-k', '--keep-tmpfiles', action='store_true',
help="Keep temporary files")
ap.add_argument('-t', '--target',
help=f'One of ({targets}), defaults to "all"')
return ap.parse_args()
Doxyfile = textwrap.dedent('''
OUTPUT_DIRECTORY = {output}
INPUT = {input}
INPUT_ENCODING = UTF-8
FILE_PATTERNS = {file_patterns}
RECURSIVE = YES
INPUT_FILTER = "{filter}"
EXCLUDE =
EXCLUDE_SYMLINKS = NO
EXCLUDE_PATTERNS = */private/* */health.lua */_*.lua
EXCLUDE_SYMBOLS =
EXTENSION_MAPPING = lua=C
EXTRACT_PRIVATE = NO
GENERATE_HTML = NO
GENERATE_DOCSET = NO
GENERATE_HTMLHELP = NO
GENERATE_QHP = NO
GENERATE_TREEVIEW = NO
GENERATE_LATEX = NO
GENERATE_RTF = NO
GENERATE_MAN = NO
GENERATE_DOCBOOK = NO
GENERATE_AUTOGEN_DEF = NO
GENERATE_XML = YES
XML_OUTPUT = xml
XML_PROGRAMLISTING = NO
ENABLE_PREPROCESSING = YES
MACRO_EXPANSION = YES
EXPAND_ONLY_PREDEF = NO
MARKDOWN_SUPPORT = YES
''')
if __name__ == "__main__":
args = parse_args()
print("Setting log level to %s" % args.log_level)
args.log_level = LOG_LEVELS[args.log_level]
log.setLevel(args.log_level)
log.addHandler(logging.StreamHandler())
if len(args.source_filter) > 0:
filter_source(args.source_filter[0])
else:
main(Doxyfile, args)
# vim: set ft=python ts=4 sw=4 tw=79 et :
| true
| true
|
790c2c3229b665e0ef3409a0e34586e0d60b9d38
| 2,637
|
py
|
Python
|
2.1 Weather/opscentretools/plotting.py
|
met-office-lab/example-notebooks
|
11bda2174f2c8f5060bb3cb630f99f2629c07346
|
[
"BSD-3-Clause"
] | 1
|
2020-07-31T21:07:44.000Z
|
2020-07-31T21:07:44.000Z
|
2.1 Weather/opscentretools/plotting.py
|
met-office-lab/example-notebooks
|
11bda2174f2c8f5060bb3cb630f99f2629c07346
|
[
"BSD-3-Clause"
] | 2
|
2016-09-12T11:11:17.000Z
|
2016-10-14T16:23:48.000Z
|
2.1 Weather/opscentretools/plotting.py
|
met-office-lab/example-notebooks
|
11bda2174f2c8f5060bb3cb630f99f2629c07346
|
[
"BSD-3-Clause"
] | 1
|
2016-09-08T13:53:47.000Z
|
2016-09-08T13:53:47.000Z
|
import holoviews as hv
import geoviews as gv
import cartopy.crs as ccrs
import cartopy.feature as cf
from holoviews.operation.datashader import regrid
from holoviews.streams import FreehandDraw
import panel as pn
pn.extension()
hv.extension('bokeh', logo=False)
import sys
# Suppress warnings
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
def interactive_plot(cube, cmap='viridis', kdims=['longitude', 'latitude'], coastlines=False , coastline_color='pink', projection=ccrs.PlateCarree, tools=['hover'], min_height=600, **opts):
# Generate an interactive Bokeh image of a cube with various plotting options
# Convert cube to GeoViews dataset
dataset = gv.Dataset(cube, [coord.name() for coord in cube.dim_coords], label=cube.name())
# Generate an image object which will dynamically render as the interactive view changes
image = regrid(dataset.to(gv.Image, kdims, dynamic=True))
# Options for plotting
options = {
'cmap': cmap,
'responsive': True,
'projection': projection(),
'colorbar': True,
'min_height': min_height,
'aspect': 2,
'tools': tools
}
# Include coastlines if needed
if coastlines:
return gv.feature.ocean * gv.feature.land * image.opts(**options, **opts) * gv.feature.coastline.opts(line_color=coastline_color)
else:
return image.opts(**options, **opts)
def dashboard_column(plots, shared_slider=False):
# Generate a Panel dashboard from a list of interactive plots
# Create a Panel object to host our plots
app = pn.GridSpec(sizing_mode='stretch_both')
# Arrange plots in a column
column = pn.Column(*plots)
# Add plots and sliders to Panel app
if shared_slider:
# Link all the sliders to one slider
# TODO: Add check for whether sliders can be linked
slider1 = column[0][1][0]
for plot in column[1:]:
slider = plot[1][0]
slider1.link(slider, value='value')
# Append all the plots to the app (using 3/4 of the horizontal space)
for i, plot in enumerate(column):
app[i, 0:4] = plot[0]
# Add the linked slider (using the last 1/4 of the horizontal space)
app[0, 4] = slider1
else:
# Append whole column (with individual sliders) to the app
app[0, 0] = column
return app
def warning_tool(color="orange"):
warning = gv.Polygons([]).opts(line_color=color, line_width=3, fill_color=color, fill_alpha=0.2)
pen = FreehandDraw(source=warning)
return pen, warning
| 35.16
| 189
| 0.662874
|
import holoviews as hv
import geoviews as gv
import cartopy.crs as ccrs
import cartopy.feature as cf
from holoviews.operation.datashader import regrid
from holoviews.streams import FreehandDraw
import panel as pn
pn.extension()
hv.extension('bokeh', logo=False)
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
def interactive_plot(cube, cmap='viridis', kdims=['longitude', 'latitude'], coastlines=False , coastline_color='pink', projection=ccrs.PlateCarree, tools=['hover'], min_height=600, **opts):
dataset = gv.Dataset(cube, [coord.name() for coord in cube.dim_coords], label=cube.name())
image = regrid(dataset.to(gv.Image, kdims, dynamic=True))
options = {
'cmap': cmap,
'responsive': True,
'projection': projection(),
'colorbar': True,
'min_height': min_height,
'aspect': 2,
'tools': tools
}
if coastlines:
return gv.feature.ocean * gv.feature.land * image.opts(**options, **opts) * gv.feature.coastline.opts(line_color=coastline_color)
else:
return image.opts(**options, **opts)
def dashboard_column(plots, shared_slider=False):
app = pn.GridSpec(sizing_mode='stretch_both')
column = pn.Column(*plots)
if shared_slider:
slider1 = column[0][1][0]
for plot in column[1:]:
slider = plot[1][0]
slider1.link(slider, value='value')
for i, plot in enumerate(column):
app[i, 0:4] = plot[0]
app[0, 4] = slider1
else:
app[0, 0] = column
return app
def warning_tool(color="orange"):
warning = gv.Polygons([]).opts(line_color=color, line_width=3, fill_color=color, fill_alpha=0.2)
pen = FreehandDraw(source=warning)
return pen, warning
| true
| true
|
790c2cce405f1d6385db0505044b1080ad3ad2b1
| 88,542
|
py
|
Python
|
python/avi/migrationtools/netscaler_converter/ns_util.py
|
thisisshi/sdk
|
99c52caffeebbfd41f43931fea2b5b1323841892
|
[
"Apache-2.0"
] | 37
|
2016-03-14T22:27:17.000Z
|
2022-03-03T05:18:39.000Z
|
python/avi/migrationtools/netscaler_converter/ns_util.py
|
thisisshi/sdk
|
99c52caffeebbfd41f43931fea2b5b1323841892
|
[
"Apache-2.0"
] | 195
|
2016-03-14T23:47:55.000Z
|
2021-05-12T11:28:56.000Z
|
python/avi/migrationtools/netscaler_converter/ns_util.py
|
thisisshi/sdk
|
99c52caffeebbfd41f43931fea2b5b1323841892
|
[
"Apache-2.0"
] | 50
|
2016-03-14T05:52:14.000Z
|
2022-01-06T06:12:00.000Z
|
import csv
import logging
import os
import copy
import re
import random
from functools import reduce
import ast
import pandas
import pexpect
import avi.migrationtools.netscaler_converter.ns_constants as ns_constants
from pkg_resources import parse_version
from xlsxwriter import Workbook
from openpyxl import load_workbook
from urllib.parse import urlparse
from OpenSSL import crypto
from socket import gethostname
from avi.migrationtools.netscaler_converter.ns_constants \
import (STATUS_SKIPPED, STATUS_SUCCESSFUL, STATUS_INDIRECT,
STATUS_NOT_APPLICABLE, STATUS_PARTIAL, STATUS_DATASCRIPT,
STATUS_INCOMPLETE_CONFIGURATION, STATUS_COMMAND_NOT_SUPPORTED,
OBJECT_TYPE_POOL_GROUP, OBJECT_TYPE_POOL, STATUS_NOT_IN_USE,
OBJECT_TYPE_HTTP_POLICY_SET, STATUS_LIST, COMPLEXITY_ADVANCED,
COMPLEXITY_BASIC, OBJECT_TYPE_APPLICATION_PERSISTENCE_PROFILE,
OBJECT_TYPE_APPLICATION_PROFILE)
from avi.migrationtools.avi_migration_utils import MigrationUtil, update_count
LOG = logging.getLogger(__name__)
csv_writer_dict_list = []
skipped_setting = {
# 'virtual_service': '',
# 'ssl key and cert': {},
# 'ssl profile': {},
# 'pool group': {},
# 'health monitor': {},
# 'Httppolicy': {}
}
# Added variable for checking progress and get overall object.
progressbar_count = 0
total_count = 0
class NsUtil(MigrationUtil):
def add_conv_status(self, line_no, cmd, object_type, full_command, conv_status,
avi_object=None):
"""
Adds as status row in conversion status csv
:param line_no: line number of command
:param object_type:
:param full_command: netscaler command
:param conv_status: dict of conversion status
:param avi_object: Converted objectconverted avi object
"""
row = {
'Line Number': line_no if line_no else '',
'Netscaler Command': cmd if cmd else '',
'Object Name': object_type if object_type else '',
'Full Command': full_command if full_command else '',
'Status': conv_status.get('status', ''),
'Skipped settings': str(conv_status.get('skipped', '')),
'Indirect mapping': str(conv_status.get('indirect', '')),
'Not Applicable': str(conv_status.get('na_list', '')),
'User Ignored': str(conv_status.get('user_ignore', '')),
'AVI Object': str(avi_object) if avi_object else ''
}
csv_writer_dict_list.append(row)
def add_complete_conv_status(self, ns_config, output_dir, avi_config,
report_name, vs_level_status):
"""
Adds as status row in conversion status csv
:param ns_config: NS config dict
:param output_dir: output directory
:param avi_config: AVI config dict
:param report_name: name of report
:param vs_level_status: add vs level details in XL sheet
"""
global csv_writer_dict_list
global progressbar_count
global total_count
print("Generating Report For Converted Configuration...")
ptotal = len(ns_config)
ppcount = 0
for config_key in ns_config:
# increment progressbar count
ppcount += 1
config_object = ns_config[config_key]
msg = "Generating report"
self.print_progress_bar(ppcount, ptotal, msg, prefix='Progress',
suffix='')
for element_key in config_object:
element_object_list = config_object[element_key]
if isinstance(element_object_list, dict):
element_object_list = [element_object_list]
for element_object in element_object_list:
match = [match for match in csv_writer_dict_list if
match['Line Number'] == element_object['line_no']]
if not match:
ns_complete_command = self.get_netscalar_full_command(
config_key, element_object)
# Add status incomplete configuration
self.add_status_row(
element_object['line_no'], config_key,
element_object['attrs'][0], ns_complete_command,
STATUS_INCOMPLETE_CONFIGURATION)
unique_line_number_list = set()
row_list = []
for dict_row in csv_writer_dict_list:
if dict_row['Line Number'] not in unique_line_number_list:
unique_line_number_list.add(dict_row['Line Number'])
row_list.append(dict_row)
else:
row = [row for row in row_list
if row['Line Number'] == dict_row['Line Number']]
if str(dict_row['AVI Object']).startswith('Skipped'):
continue
if dict_row.get('AVI Object', None):
# Added condition to check unique status.
if str(row[0]['AVI Object']) != str(dict_row['AVI Object']):
row[0]['AVI Object'] += '__/__%s' % dict_row[
'AVI Object']
for status in STATUS_LIST:
status_list = [row for row in row_list if
row['Status'] == status]
print('%s: %s' % (status, len(status_list)))
# add skipped list of each object at vs level
print("Writing Excel Sheet For Converted Configuration...")
total_count = total_count + len(row_list)
if vs_level_status:
self.vs_per_skipped_setting_for_references(avi_config)
self.correct_vs_ref(avi_config)
else:
# Call to calculate vs complexity
self.vs_complexity_level()
# Write status report and pivot table in xlsx report
self.write_status_report_and_pivot_table_in_xlsx(
row_list, output_dir, report_name, vs_level_status)
def add_status_row(self, line_no, cmd, object_type, full_command, status,
avi_object=None):
"""
Adds as status row in conversion status csv
:param line_no:
:param cmd: netscaler command
:param object_type:
:param full_command:
:param status: conversion status
:param avi_object:
"""
global csv_writer_dict_list
row = {
'Line Number': line_no if line_no else '',
'Netscaler Command': cmd,
'Object Name': object_type,
'Full Command': full_command,
'Status': status,
'AVI Object': str(avi_object) if avi_object else ''
}
csv_writer_dict_list.append(row)
def add_csv_headers(self, csv_file):
"""
Adds header line in conversion status file
:param csv_file: File to which header is to be added
"""
global csv_writer
fieldnames = ['Line Number', 'Netscaler Command', 'Object Name',
'Full Command', 'Status', 'Skipped settings',
'Indirect mapping', 'Not Applicable', 'User Ignored',
'AVI Object']
csv_writer = csv.DictWriter(csv_file, fieldnames=fieldnames,
lineterminator='\n', )
csv_writer.writeheader()
def get_avi_lb_algorithm(self, ns_algorithm):
"""
Converts NS LB algorithm to equivalent avi LB algorithm
:param ns_algorithm: NS algorithm name
:return: Avi LB algorithm enum value
"""
avi_algorithm = 'LB_ALGORITHM_LEAST_CONNECTIONS'
if ns_algorithm == 'LEASTCONNECTIONS':
avi_algorithm = 'LB_ALGORITHM_LEAST_CONNECTIONS'
elif ns_algorithm == 'ROUNDROBIN':
avi_algorithm = 'LB_ALGORITHM_ROUND_ROBIN'
elif ns_algorithm in ['LEASTRESPONSETIME', 'LRTM']:
avi_algorithm = 'LB_ALGORITHM_FASTEST_RESPONSE'
elif ns_algorithm == 'SOURCEIPHASH':
avi_algorithm = 'LB_ALGORITHM_CONSISTENT_HASH'
elif ns_algorithm == 'URLHASH':
avi_algorithm = 'LB_ALGORITHM_CONSISTENT_HASH_URI'
return avi_algorithm
def update_algo_for_pools(self, algo, pg_name, avi_config):
pool_group = [pg for pg in avi_config['PoolGroup'] if
pg['name'] == pg_name][0]
for member in pool_group['members']:
pool_name = self.get_name(member['pool_ref'])
pool = [pool for pool in avi_config['Pool'] if
pool['name'] == pool_name][0]
pool['lb_algorithm'] = algo
def get_avi_resp_code(self, respCode):
"""
This function used for getting appropriate response code for avi.
:param respCode: response code
:return: returns list of unique responses.
"""
avi_resp_codes = []
codes = []
for res_code in respCode.split(' '):
if '-' in res_code:
codes.extend(res_code.split('-'))
else:
codes.append(res_code)
for code in codes:
if code and code.strip().isdigit():
# Converted to int.
code = int(code.strip())
if code < 200:
avi_resp_codes.append("HTTP_1XX")
elif code < 300:
avi_resp_codes.append("HTTP_2XX")
elif code < 400:
avi_resp_codes.append("HTTP_3XX")
elif code < 500:
avi_resp_codes.append("HTTP_4XX")
elif code < 600:
avi_resp_codes.append("HTTP_5XX")
# Get the unique dict from list.
avi_resp_codes = list(set(avi_resp_codes))
if not avi_resp_codes:
avi_resp_codes = ["HTTP_ANY"]
return avi_resp_codes
def get_conv_status(self, ns_object, skipped_list, na_list, indirect_list,
ignore_for_val=None, indirect_commands=None,
user_ignore_val=[]):
"""
This function used for getting status detail for command like
skipped or indirect.
:param ns_object: Netscaler parsed config
:param skipped_list: list of skipped commands list.
:param na_list: not applicable commands list.
:param indirect_list: indirect command list
:param ignore_for_val: optional field
:param indirect_commands: indirect commands
:param user_ignore_val: List of user ignore attributes
:return: returns dict of coversion status.
"""
skipped = [attr for attr in ns_object.keys() if attr in skipped_list]
na = [attr for attr in ns_object.keys() if attr in na_list]
indirect = [attr for attr in ns_object.keys() if attr in indirect_list]
# List of ignore attributes which are present in skipped
user_ignore = [val for val in skipped if val in user_ignore_val]
# Removed the attributes from skipped which are in user ignore list
skipped = [attr for attr in skipped if attr not in user_ignore_val]
if ignore_for_val:
for key in ignore_for_val.keys():
if key not in ns_object:
continue
ns_val = ns_object.get(key)
ignore_val = ignore_for_val.get(key)
if key in skipped and str(ns_val) == str(ignore_val):
skipped.remove(key)
if skipped:
status = STATUS_PARTIAL
else:
status = STATUS_SUCCESSFUL
conv_status = {
'skipped': skipped,
'indirect': indirect,
'na_list': na,
'status': status,
'user_ignore': user_ignore
}
return conv_status
def get_key_cert_obj(self, name, key_file_name, cert_file_name, input_dir):
"""
:param name:name of ssl cert.
:param key_file_name: key file (ie.pem)
:param cert_file_name: certificate file name
:param input_dir: input directory for certificate file name
:return: returns dict of ssl object
"""
folder_path = input_dir + os.path.sep
key = self.upload_file(folder_path + key_file_name)
cert = self.upload_file(folder_path + cert_file_name)
ssl_kc_obj = None
if key and cert:
cert = {"certificate": cert}
ssl_kc_obj = {
'name': name,
'key': key,
'certificate': cert,
'key_passphrase': ''
}
return ssl_kc_obj
def get_command_from_line(self, line):
"""
This function is used for getting command and line number from conf file.
:param line: line
:return: returns command name and line
"""
cmd = ''
line_no = 0
for member in line:
if 'line_no' in member:
line_no = member[1]
continue
if isinstance(member, str):
cmd += ' %s' % member
else:
cmd += ' -%s' % ' '.join(member)
return cmd, line_no
def update_status_for_skipped(self, skipped_cmds):
"""
:param skipped_cmds: separation of non converted commands
to NA, Indirect,DataScript,NotSupported
:return: None
"""
na_cmds = ns_constants.netscalar_command_status['NotApplicableCommands']
indirect_cmds = ns_constants.netscalar_command_status[
'IndirectCommands']
datascript_cmds = \
ns_constants.netscalar_command_status['DatascriptCommands']
not_supported = ns_constants.netscalar_command_status['NotSupported']
if not skipped_cmds:
return
for cmd in skipped_cmds:
line_no = cmd['line_no']
cmd = cmd['cmd']
cmd = cmd.strip()
for na_cmd in na_cmds:
if cmd.startswith(na_cmd):
# Add status not applicable in csv/report
self.add_status_row(line_no, na_cmd, None, cmd,
STATUS_NOT_APPLICABLE)
break
for id_cmd in indirect_cmds:
if cmd.startswith(id_cmd):
# Add status indirect in csv/report
self.add_status_row(line_no, id_cmd, None, cmd, STATUS_INDIRECT)
break
for datascript_cmd in datascript_cmds:
if cmd.startswith(datascript_cmd):
# Add status datascript in csv/report
self.add_status_row(line_no, datascript_cmd, None, cmd,
STATUS_DATASCRIPT)
break
for not_commands in not_supported:
if cmd.startswith(not_commands):
# Add status not not supported in csv/report
self.add_status_row(line_no, not_commands, None, cmd,
STATUS_COMMAND_NOT_SUPPORTED)
break
def remove_duplicate_objects(self, obj_type, obj_list):
"""
Remove duplicate objects from list
:param obj_type: Object type
:param obj_list: list of all objects
:return: return list which has no duplicates objects
"""
if len(obj_list) == 1:
return obj_list
for source_obj in obj_list:
for index, tmp_obj in enumerate(obj_list):
if tmp_obj["name"] == source_obj["name"]:
continue
src_cp = copy.deepcopy(source_obj)
tmp_cp = copy.deepcopy(tmp_obj)
del src_cp["name"]
if "description" in src_cp:
del src_cp["description"]
del tmp_cp["name"]
if "description" in tmp_cp:
del tmp_cp["description"]
if src_cp.items() == tmp_cp.items():
LOG.warn('Remove duplicate %s object : %s' % (obj_type,
tmp_obj[
"name"]))
del obj_list[index]
self.remove_duplicate_objects(obj_type, obj_list)
return obj_list
def cleanup_config(self, config):
"""
This function is used for deleting temp variables created for conversion
:param config: dict type
:return: None
"""
del config
def clone_pool(self, pool_name, cloned_for, avi_config, userprefix=None):
"""
This function used for cloning shared pools in netscaler.
:param pool_name: name of pool
:param cloned_for: cloned for
:param avi_config: avi config dict
:param userprefix: prefix for objects
:return: None
"""
pools = [pool for pool in avi_config['Pool'] if
pool['name'] == pool_name]
if pools:
pool_obj = copy.deepcopy(pools[0])
pname = pool_obj['name']
pool_name = re.sub('[:]', '-', '%s-%s' % (pname, cloned_for))
pool_obj['name'] = pool_name
avi_config['Pool'].append(pool_obj)
LOG.info(
"Same pool reference to other object. Clone Pool %s for %s" %
(pool_name, cloned_for))
return pool_obj['name']
return None
def get_vs_if_shared_vip(self, avi_config, controller_version):
"""
This function checks if same vip is used for other vs
:param avi_config: avi config dict
:param controller_version:
:return: None
"""
vs_list = [v for v in avi_config['VirtualService'] if
'port_range_end' in
v['services'][0]]
for vs in vs_list:
# Get the list of vs which shared the same vip
if parse_version(controller_version) >= parse_version('17.1'):
vs_port_list = [int(v['services'][0]['port']) for v in
avi_config['VirtualService']
if v['vsvip_ref'].split('name=')[1].split('-')[0] ==
vs['vsvip_ref'].split('name=')[1].split('-')[0]
and 'port_range_end' not in v['services'][0]]
else:
vs_port_list = [int(v['services'][0]['port']) for v in
avi_config['VirtualService'] if v['ip_address'][
'addr'] == vs['ip_address']['addr'] and
'port_range_end' not in v['services'][0]]
if vs_port_list:
min_port = min(vs_port_list)
max_port = max(vs_port_list)
vs['services'][0]['port_range_end'] = str(min_port - 1)
service = {
'enable_ssl': False,
'port': str(max_port + 1),
'port_range_end': '65535'
}
vs['services'].append(service)
def add_prop_for_http_profile(self, profile_name, avi_config, sysdict,
prop_dict):
"""
This method adds the additional attribute to application profile
:param profile_name: name of application profile
:param avi_config: avi config dict
:param sysdict: system/baseline config dict
:param prop_dict: property dict
:return:
"""
profile = [p for p in (avi_config['ApplicationProfile'] + sysdict[
'ApplicationProfile']) if p['name'] == profile_name]
if profile:
if prop_dict.get('clttimeout'):
profile[0]['client_header_timeout'] = int(prop_dict[
'clttimeout'])
profile[0]['client_body_timeout'] = int(prop_dict['clttimeout'])
if prop_dict.get('xff_enabled'):
if profile[0].get('http_profile'):
profile[0]['http_profile'].update(
{
'xff_enabled': True,
'xff_alternate_name': 'X-Forwarded-For'
}
)
else:
profile[0].update({'http_profile':
{
'xff_enabled': True,
'xff_alternate_name': 'X-Forwarded-For'
}
})
if profile[0].get('http_profile'):
profile[0]['http_profile'].update(
{
'x_forwarded_proto_enabled': True,
'hsts_enabled': True,
'http_to_https': True,
'httponly_enabled': True,
'hsts_max_age': 365,
'server_side_redirect_to_https': True,
'secure_cookie_enabled': True
}
)
else:
profile[0].update({'http_profile':
{
'x_forwarded_proto_enabled': True,
'hsts_enabled': True,
'http_to_https': True,
'httponly_enabled': True,
'hsts_max_age': 365,
'server_side_redirect_to_https': True,
'secure_cookie_enabled': True
}
})
def object_exist(self, object_type, name, avi_config):
'''
This method returns true if object exists in avi config dict else false
:param object_type:
:param name:
:param avi_config:
:return:
'''
data = avi_config[object_type]
obj_list = [obj for obj in data if obj['name'] == name]
if obj_list:
return True
return False
def is_shared_same_vip(self, vs, cs_vs_list, avi_config, tenant_name,
cloud_name, tenant_ref, cloud_ref,
controller_version, prefix, input_vrf=None):
"""
This function check for vs sharing same vip
:param vs: Name of vs
:param cs_vs_list: List of vs
:param avi_config: avi config dict
:param tenant_name: Name of tenant
:param cloud_name: Name of cloud
:param tenant_ref: Reference of tenant
:param cloud_ref: Reference of cloud
:param controller_version: controller version
:param prefix: prefix for objects
:param input_vrf: VRF name input
:return: None
"""
if parse_version(controller_version) >= parse_version('17.1'):
# Get the list of vs which shared the same vip
shared_vip = [v for v in cs_vs_list if v['vsvip_ref'
].split('name=')[1].split('-')[0] == vs['vsvip_ref'
].split('name=')[1].split('-')[0] and
v['services'][0][
'port'] == vs['services'][0]['port']]
else:
shared_vip = [v for v in cs_vs_list if v['ip_address']['addr'] ==
vs['ip_address']['addr'] and v['services'][0][
'port'] ==
vs['services'][0]['port']]
if input_vrf:
vrf_ref = self.get_object_ref(input_vrf, 'vrfcontext',
cloud_name=cloud_name)
else:
vrf_ref = self.get_object_ref('global', 'vrfcontext',
cloud_name=cloud_name)
if shared_vip:
return True
elif parse_version(controller_version) >= parse_version('17.1'):
vsvip = vs['vsvip_ref'].split('name=')[1].split('-')[0]
self.create_update_vsvip(vsvip, avi_config['VsVip'], tenant_ref,
cloud_ref, prefix=prefix, vrf_ref=vrf_ref)
name = vsvip + '-vsvip'
# Added prefix for objects
if prefix:
name = prefix + '-' + vsvip + '-vsvip'
updated_vsvip_ref = self.get_object_ref(
name, 'vsvip', tenant_name, cloud_name)
vs['vsvip_ref'] = updated_vsvip_ref
def clone_http_policy_set(self, policy, prefix, avi_config, tenant_name,
cloud_name, used_poolgrp_ref, userprefix=None):
"""
This function clone pool reused in context switching rule
:param policy: name of policy
:param prefix: clone for
:param avi_config: avi config dict
:param tenant_name:
:param cloud_name:
:param used_poolgrp_ref:
:param userprefix: prefix for objects
:return:None
"""
policy_name = policy['name']
clone_policy = copy.deepcopy(policy)
for rule in clone_policy['http_request_policy']['rules']:
if rule.get('switching_action', None) and \
rule['switching_action'].get('pool_group_ref'):
pool_group_ref = \
rule['switching_action']['pool_group_ref'].split('&')[
1].split(
'=')[1]
if pool_group_ref in used_poolgrp_ref:
LOG.debug('Cloned the pool group for policy %s',
policy_name)
pool_group_ref = self.clone_pool_group(
pool_group_ref, policy_name, avi_config, tenant_name,
cloud_name, userprefix=userprefix)
if pool_group_ref:
updated_pool_group_ref = self.get_object_ref(
pool_group_ref, OBJECT_TYPE_POOL_GROUP, tenant_name,
cloud_name)
rule['switching_action']['pool_group_ref'] = \
updated_pool_group_ref
clone_policy['name'] += '-%s-clone' % prefix
return clone_policy
def set_rules_index_for_http_policy_set(self, avi_config):
"""
Update index as per avi protobuf requirements
:param avi_config: avi config dict
:return: None
"""
http_policy_sets = avi_config['HTTPPolicySet']
for http_policy_set in http_policy_sets:
rules = http_policy_set['http_request_policy']['rules']
rules = sorted(rules, key=lambda d: int(d['index']))
for index, rule in enumerate(rules):
rule['index'] = index
def get_netscalar_full_command(self, netscalar_command, obj):
"""
Generate netscaler command from the parse dict
:param netscalar_command: name of command
:param obj: object with attributes
:return: Full command
"""
for attr in obj['attrs']:
netscalar_command += ' %s' % attr
for key in obj:
if isinstance(obj[key], list):
continue
if key == 'line_no':
continue
netscalar_command += ' -%s %s' % (key, obj[key])
return netscalar_command
def clone_pool_group(self, pg_name, cloned_for, avi_config, tenant_name,
cloud_name, userprefix=None):
"""
Used for cloning shared pool group.
:param pg_name: pool group name
:param cloned_for: clone for
:param avi_config: avi config dict
:param tenant_name:
:param cloud_name:
:param userprefix: prefix for objects
:return: None
"""
pool_groups = [pg for pg in avi_config['PoolGroup']
if pg['name'] == pg_name]
if pool_groups:
pool_group = copy.deepcopy(pool_groups[0])
pool_group_name = re.sub('[:]', '-',
'%s-%s' % (pg_name, cloned_for))
pool_group['name'] = pool_group_name
for member in pool_group.get('members', []):
pool_ref = self.get_name(member['pool_ref'])
pool_ref = self.clone_pool(pool_ref, cloned_for, avi_config,
userprefix=userprefix)
if pool_ref:
updated_pool_ref = self.get_object_ref(
pool_ref, OBJECT_TYPE_POOL, tenant_name, cloud_name)
member['pool_ref'] = updated_pool_ref
avi_config['PoolGroup'].append(pool_group)
LOG.info(
"Same pool group reference to other object. Clone Pool group "
"%s for %s" % (pg_name, cloned_for))
return pool_group['name']
return None
def remove_http_mon_from_pool(self, avi_config, pool, sysdict):
"""
This function is used for removing http type health monitor from https
vs.
:param avi_config: avi config dict
:param pool: name of pool
:param sysdict: baseline/system config dict
:return: None
"""
if pool:
hm_refs = copy.deepcopy(pool['health_monitor_refs'])
for hm_ref in hm_refs:
hm = [h for h in (sysdict['HealthMonitor'] + avi_config[
'HealthMonitor']) if h['name'] == hm_ref]
if hm and hm[0]['type'] == 'HEALTH_MONITOR_HTTP':
pool['health_monitor_refs'].remove(hm_ref)
LOG.warning(
'Skipping %s this reference from %s pool because '
'of health monitor type is HTTP and VS has ssl '
'profile.' % (hm_ref, pool['name']))
def remove_https_mon_from_pool(self, avi_config, pool, sysdict):
"""
This function is used for removing https type health monitor from http
vs.
:param avi_config: avi config dict
:param pool: name of pool
:param sysdict: baseline/system config dict
:return: None
"""
if pool:
hm_refs = copy.deepcopy(pool['health_monitor_refs'])
for hm_ref in hm_refs:
hm = [h for h in (sysdict['HealthMonitor'] + avi_config[
'HealthMonitor']) if h['name'] == hm_ref]
if hm and hm[0]['type'] == 'HEALTH_MONITOR_HTTPS':
pool['health_monitor_refs'].remove(hm_ref)
LOG.warning(
'Skipping %s this reference from %s pool because '
'of health monitor type is HTTPS and VS has no ssl '
'profile.' % (hm_ref, pool['name']))
def update_application_profile(self, profile_name, pki_profile_ref,
tenant_ref, name, avi_config, sysdict):
"""
This functions defines to update application profile with pki profile if
application profile exist if not create new http profile with pki profile
:param profile_name: name of Http profile
:param pki_profile_ref: ref of PKI profile
:param tenant_ref: tenant ref
:param name: name of virtual service
:param avi_config: Dict of AVi config
:param sysdict: baseline/system config
:return: Http profile
"""
try:
if profile_name:
app_profile = [p for p in (sysdict['ApplicationProfile'] +
avi_config['ApplicationProfile']) if
p['name'] ==
profile_name]
if app_profile:
app_profile[0]["http_profile"]['pki_profile_ref'] = \
pki_profile_ref
LOG.debug('Added PKI profile to application profile '
'successfully : %s' % (
profile_name, pki_profile_ref))
else:
app_profile = dict()
app_profile['name'] = name + '-%s-%s' % (
random.randrange(0, 1000),
ns_constants.PLACE_HOLDER_STR)
app_profile['tenant_ref'] = tenant_ref
app_profile['type'] = 'APPLICATION_PROFILE_TYPE_HTTP'
http_profile = dict()
http_profile['connection_multiplexing_enabled'] = False
http_profile['xff_enabled'] = False
# TODO: clientIpHdrExpr conversion to xff_alternate_name
http_profile['websockets_enabled'] = False
http_profile['pki_profile_ref'] = pki_profile_ref
app_profile["http_profile"] = http_profile
avi_config['ApplicationProfile'].append(app_profile)
LOG.debug(
"Conversion completed successfully for httpProfile: %s" %
app_profile['name'])
return app_profile['name']
except:
update_count('error')
LOG.error("Error in convertion of httpProfile", exc_info=True)
def convert_persistance_prof(self, vs, name, tenant_ref):
"""
This function defines that it convert the persistent profile and
return that profile
:param vs: object of lb vs or pool
:param name: name of application persteance profile
:param tenant_ref: reference of tenant
:return: application persistent profile
"""
profile = None
persistenceType = vs.get('persistenceType', '')
if persistenceType == 'COOKIEINSERT':
timeout = vs.get('timeout', 2)
profile = {
"http_cookie_persistence_profile": {
"always_send_cookie": False
},
"persistence_type": "PERSISTENCE_TYPE_HTTP_COOKIE",
"server_hm_down_recovery": "HM_DOWN_PICK_NEW_SERVER",
"name": name,
}
# Added time if greater than zero
if int(timeout) > 0:
profile['http_cookie_persistence_profile']["timeout"] = timeout
elif persistenceType == 'SOURCEIP':
# Set timeout equal to 2 if not provided.
timeout = vs.get('timeout', 120)
timeout = int(timeout) / 60
if timeout < 1:
timeout = 1
profile = {
"server_hm_down_recovery": "HM_DOWN_PICK_NEW_SERVER",
"persistence_type": "PERSISTENCE_TYPE_CLIENT_IP_ADDRESS",
"ip_persistence_profile": {
"ip_persistent_timeout": timeout
},
"name": name
}
elif persistenceType == 'SSLSESSION':
profile = {
"server_hm_down_recovery": "HM_DOWN_PICK_NEW_SERVER",
"persistence_type": "PERSISTENCE_TYPE_TLS",
"name": name
}
profile['tenant_ref'] = tenant_ref
return profile
def update_status_target_lb_vs_to_indirect(self, larget_lb_vs):
"""
This function defines that update status for the target lb vserver as
Indirect
:param larget_lb_vs: name of target lb vserver
:return: None
"""
global csv_writer_dict_list
row = [row for row in csv_writer_dict_list
if row['Object Name'] == larget_lb_vs
and row['Netscaler Command'] == 'add lb vserver']
if row:
row[0]['Status'] = STATUS_INDIRECT
def create_http_policy_set_for_redirect_url(self, vs_obj, redirect_uri,
avi_config, tenant_name, tenant_ref, enable_ssl):
"""
This function defines that create http policy for redirect url
:param vs_obj: object of VS
:param redirect_uri: redirect uri
:param avi_config: dict of AVi
:param tenant_name: name of tenant
:param tenant_ref: tenant ref
:param enable_ssl: flag for enabling ssl
:return: None
"""
redirect_uri = str(redirect_uri).replace('"', '')
action = self.build_redirect_action_dict(redirect_uri, enable_ssl)
policy_obj = {
'name': vs_obj['name'] + '-redirect-policy',
'tenant_ref': tenant_ref,
'http_request_policy': {
'rules': [
{
'index': 0,
'name': vs_obj['name'] + '-redirect-policy-rule-0',
'match': {
'path': {
'match_case': 'INSENSITIVE',
'match_str': [
'/'
],
'match_criteria': 'EQUALS'
}
},
'redirect_action': action
}
]
}
}
updated_http_policy_ref = self.get_object_ref(policy_obj['name'],
OBJECT_TYPE_HTTP_POLICY_SET,
tenant_name)
http_policies = {
'index': 11,
'http_policy_set_ref': updated_http_policy_ref
}
if not vs_obj.get('http_policies'):
vs_obj['http_policies'] = []
else:
ind = max([policies['index'] for policies in vs_obj[
'http_policies']])
http_policies['index'] = ind + 1
vs_obj['http_policies'].append(http_policies)
avi_config['HTTPPolicySet'].append(policy_obj)
def clean_virtual_service_from_avi_config(self, avi_config,
controller_version):
"""
This function defines that clean up vs which has vip 0.0.0.0
:param avi_config: dict of AVI
:param controller_version:
:return: None
"""
vs_list = copy.deepcopy(avi_config['VirtualService'])
avi_config['VirtualService'] = []
if parse_version(controller_version) >= parse_version('17.1'):
avi_config['VirtualService'] = \
[vs for vs in vs_list
if vs['vsvip_ref'].split('name=')[1].split('-')[0] != '0.0.0.0']
else:
avi_config['VirtualService'] = \
[vs for vs in vs_list
if vs['ip_address']['addr'] != '0.0.0.0']
def parse_url(self, url):
"""
This method returns the parsed url
:param url: url that need to be parsed
:return:
"""
parsed = urlparse(url)
return parsed
def format_string_to_json(self, avi_string):
"""
This function defines that it convert string into json format to
convert into dict
:param avi_string: string to be converted
:return: Return converted string
"""
avi_string = avi_string.split('__/__')[0]
return ast.literal_eval(avi_string)
def get_csv_object_list(self, csv_writer_dict_list, command_list):
"""
This method is used for getting csv object
:param csv_writer_dict_list: CSV row of object from xlsx report
:param command_list: List of netscaler commands
:return: List of CSV rows
"""
csv_object = [row for row in
csv_writer_dict_list
if row['Status'] in [STATUS_PARTIAL, STATUS_SUCCESSFUL]
and row['Netscaler Command'] in
command_list]
return csv_object
def get_csv_skipped_list(self, csv_object, name_of_object, vs_ref):
"""
This method is used for getting skipped list from vs.
:param csv_object: CSV row of object from xlsx report
:param name_of_object: Name of Object
:param vs_ref: Reference of VS
:return: List of skipped settings
"""
skipped_list = []
for each_partial in csv_object:
avi_object_json = \
self.format_string_to_json(each_partial['AVI Object'])
if avi_object_json.get('name') and \
avi_object_json['name'] == name_of_object:
# Set the VS reference for Netscaler status row
each_partial['VS Reference'] = vs_ref
repls = ('[', ''), (']', '')
skipped_setting_csv = reduce(lambda a, kv: a.replace(*kv),
repls,
each_partial['Skipped settings'])
if skipped_setting_csv:
skipped_list.append(skipped_setting_csv)
return skipped_list
def get_ssl_key_and_cert_refs_skipped(self, csv_writer_dict_list,
object_name, vs_ref):
"""
This functions defines that get the skipped list of CSV row
:param csv_writer_dict_list: CSV row of object from xlsx report
:param object_name: like virtual service or pool name
:param vs_ref: Reference of VS
:return: List of skipped settings
"""
ssl_key_cert = \
self.get_name(object_name['ssl_key_and_certificate_refs'][0])
csv_object = self.get_csv_object_list(
csv_writer_dict_list, ['bind ssl vserver', 'bind ssl service',
'bind ssl serviceGroup'])
skipped_list = self.get_csv_skipped_list(csv_object, ssl_key_cert,
vs_ref)
return ssl_key_cert, skipped_list
def get_ssl_profile_skipped(self, csv_writer_dict_list, ssl_profile_ref,
vs_ref):
"""
This functions defines that get the skipped list of CSV row
:param csv_writer_dict_list: CSV row of object from xlsx report
:param ssl_profile_ref: reference of ssl profile object
:param vs_ref: virtual service obj reference.
:return: List of skipped settings
"""
ssl_profile_name = self.get_name(ssl_profile_ref)
csv_object = \
self.get_csv_object_list(csv_writer_dict_list,
['set ssl vserver', 'set ssl service',
'set ssl serviceGroup'])
skipped_list = self.get_csv_skipped_list(csv_object, ssl_profile_name,
vs_ref)
return ssl_profile_name, skipped_list
def get_application_profile_skipped(self, csv_writer_dict_list,
name_of_object, vs_ref):
"""
This functions defines that get the skipped list of CSV row
:param csv_writer_dict_list: CSV row of object from xlsx report
:param name_of_object: object name like pool name, etc
:param vs_ref: virtual service obj reference.
:return: List of skipped settings
"""
ssl_profile_name = self.get_name(
name_of_object['application_profile_ref'])
csv_object = self.get_csv_object_list(
csv_writer_dict_list, ['add ns httpProfile'])
skipped_list = self.get_csv_skipped_list(csv_object, ssl_profile_name,
vs_ref)
return ssl_profile_name, skipped_list
def get_network_profile_skipped(self, csv_writer_dict_list, name_of_object,
vs_ref):
"""
This functions defines that get the skipped list of CSV row
:param csv_writer_dict_list:List of add ns tcpProfile netscaler command rows
:param name_of_object: object name like pool name, etc
:param vs_ref: virtual service obj reference.
:return: List of skipped settings
"""
ssl_profile_name = self.get_name(name_of_object['network_profile_ref'])
csv_object = self.get_csv_object_list(
csv_writer_dict_list, ['add ns tcpProfile'])
skipped_list = self.get_csv_skipped_list(csv_object, ssl_profile_name,
vs_ref)
return ssl_profile_name, skipped_list
def get_app_persistence_profile_skipped(self, csv_writer_dict_list,
name_of_object, vs_ref):
"""
This functions defines that get the skipped list of CSV row
:param csv_writer_dict_list: List of set lb group netscaler command rows
:param name_of_object: object name like pool name, etc
:param vs_ref: virtual service obj reference.
:return: List of skipped settings
"""
# Changed ssl profile name to ssl profile ref.
app_persistence_profile_name = self.get_name(
name_of_object['ssl_profile_ref'])
csv_object = self.get_csv_object_list(csv_writer_dict_list, ['set lb group'])
skipped_list = self.get_csv_skipped_list(
csv_object, app_persistence_profile_name, vs_ref)
return app_persistence_profile_name, skipped_list
def get_pool_skipped_list(self, avi_config, pool_group_name,
skipped_setting, csv_object, obj_name,
csv_writer_dict_list, vs_ref):
"""
This method is used for getting pool skipped list.
:param avi_config: AVI dict
:param pool_group_name: Name of Pool group
:param skipped_setting: List of skipped settings
:param csv_object: CSV row
:param obj_name: Name of Object
:param csv_writer_dict_list: List of bind lb vserver netscaler command
rows
:param vs_ref: vs object reference
:return: List of skipped settings
"""
pool_group_object_ref = [pool_group_object_ref for pool_group_object_ref
in avi_config['PoolGroup'] if
pool_group_object_ref[
'name'] == pool_group_name]
for pool_group in pool_group_object_ref:
if 'members' in pool_group:
for each_pool_ref in pool_group['members']:
pool_name = self.get_name(each_pool_ref['pool_ref'])
skipped_list = self.get_csv_skipped_list(csv_object, pool_name,
vs_ref)
if len(skipped_list) > 0:
skipped_setting[obj_name] = {}
skipped_setting[obj_name]['pool'] = {}
skipped_setting[obj_name]['pool'][
'pool_name'] = pool_name
skipped_setting[obj_name]['pool']['pool_skipped_list'] \
= skipped_list
for pool_partial in csv_object:
avi_object_json = self.format_string_to_json(
pool_partial['AVI Object'])
if avi_object_json['name'] == pool_name:
if 'health_monitor_refs' in avi_object_json and \
avi_object_json['health_monitor_refs']:
monitor_refs = \
avi_object_json['health_monitor_refs']
for monitor_ref in monitor_refs:
monitor_ref = self.get_name(monitor_ref)
csv_object = self.get_csv_object_list(
csv_writer_dict_list,
['add lb monitor'])
skipped_list = self.get_csv_skipped_list(
csv_object, monitor_ref, vs_ref)
if skipped_list:
skipped_setting[obj_name] = {}
skipped_setting[obj_name]['pool'] = {}
skipped_setting[obj_name]['pool'][
'pool_name'] = pool_name
skipped_setting[obj_name]['pool'][
'health monitor'] = {}
skipped_setting[obj_name]['pool'][
'health monitor'][
'name'] = monitor_ref
skipped_setting[obj_name]['pool'][
'health monitor']['skipped_list'] =\
skipped_list
if 'ssl_key_and_certificate_refs' in avi_object_json:
name, skipped = \
self.get_ssl_key_and_cert_refs_skipped(
csv_writer_dict_list, avi_object_json,
vs_ref)
if skipped:
skipped_setting[obj_name] = {}
skipped_setting[obj_name]['pool'] = {}
skipped_setting[obj_name]['pool'][
'pool_name'] = pool_name
skipped_setting[
obj_name]['pool'][
'ssl key and cert'] = {}
skipped_setting[
obj_name]['pool']['ssl key and cert'][
'name'] = name
skipped_setting[
obj_name]['pool']['ssl key and cert'][
'skipped_list'] = skipped
if 'ssl_profile_ref' in avi_object_json:
name, skipped = \
self.get_ssl_profile_skipped(
csv_writer_dict_list, avi_object_json[
'ssl_profile_ref'], vs_ref)
if skipped:
skipped_setting[obj_name] = {}
skipped_setting[obj_name]['pool'] = {}
skipped_setting[obj_name]['pool'][
'pool_name'] = pool_name
skipped_setting[obj_name]['pool'][
'ssl profile'] = {}
skipped_setting[obj_name]['pool'][
'ssl profile']['name'] = name
skipped_setting[obj_name]['pool'][
'ssl profile']['skipped_list'] = skipped
# Get the skipped settings of application
# persistence profile ref.
if 'application_persistence_profile_ref' in \
avi_object_json:
name, skipped = \
self.get_app_persistence_profile_skipped(
csv_writer_dict_list, avi_object_json,
vs_ref)
if skipped:
skipped_setting[obj_name] = {}
skipped_setting[obj_name]['pool'] = {}
skipped_setting[obj_name]['pool'][
'pool_name'] = pool_name
skipped_setting[obj_name]['pool'][
'Application Persistence profile'] = {}
skipped_setting[obj_name]['pool'][
'Application Persistence profile'][
'name'] = name
skipped_setting[obj_name]['pool'][
'Application Persistence profile'][
'skipped_list'] = skipped
# Get the skipped settings of application
# persistence profile ref.
if 'application_persistence_profile_ref' \
in avi_object_json:
name, skipped = \
self.get_app_persistence_profile_skipped(
csv_writer_dict_list, avi_object_json,
vs_ref)
if skipped:
skipped_setting[obj_name] = {}
skipped_setting[obj_name]['pool'] = {}
skipped_setting[obj_name]['pool'][
'pool_name'] = pool_name
skipped_setting[obj_name]['pool'][
'Application Persistence profile'] = {}
skipped_setting[obj_name]['pool'][
'Application Persistence profile'][
'name'] = name
skipped_setting[obj_name]['pool'][
'Application Persistence profile'][
'skipped_list'] = skipped
def vs_complexity_level(self):
"""
This method calculate complexity of vs.
:return:
"""
vs_csv_objects = [row for row in csv_writer_dict_list
if
row['Status'] in [STATUS_PARTIAL, STATUS_SUCCESSFUL]
and row['Netscaler Command'] in [
'add cs vserver', 'add lb vserver']]
for vs_csv_object in vs_csv_objects:
virtual_service = self.format_string_to_json(
vs_csv_object['AVI Object'])
# Update the complexity level of VS as Basic or Advanced
self.update_vs_complexity_level(vs_csv_object, virtual_service)
def vs_per_skipped_setting_for_references(self, avi_config):
"""
This functions defines that Add the skipped setting per VS CSV row
:param avi_config: this methode use avi_config for checking vs skipped
:return: None
"""
# Get the count of vs sucessfully migrated
global fully_migrated
global total_count
global progressbar_count
fully_migrated = 0
# Get the VS object list which is having status successful and partial.
vs_csv_objects = [row for row in csv_writer_dict_list
if
row['Status'] in [STATUS_PARTIAL, STATUS_SUCCESSFUL]
and row['Netscaler Command'] in [
'add cs vserver', 'add lb vserver']]
# calculate total count
total_count = total_count + len(vs_csv_objects)
for vs_csv_object in vs_csv_objects:
progressbar_count += 1
skipped_setting = {}
virtual_service = self.format_string_to_json(
vs_csv_object['AVI Object'])
# Update the complexity level of VS as Basic or Advanced
self.update_vs_complexity_level(vs_csv_object, virtual_service)
vs_ref = virtual_service['name']
repls = ('[', ''), (']', '')
# Get list of skipped setting attributes
skipped_setting_csv = reduce(lambda a, kv: a.replace(*kv), repls,
vs_csv_object['Skipped settings'])
if skipped_setting_csv:
skipped_setting['virtual_service'] = [skipped_setting_csv]
# Get the skipped list for ssl key and cert
if 'ssl_key_and_certificate_refs' in virtual_service:
name, skipped = self.get_ssl_key_and_cert_refs_skipped(
csv_writer_dict_list, virtual_service, vs_ref)
if skipped:
skipped_setting['ssl key and cert'] = {}
skipped_setting['ssl key and cert']['name'] = name
skipped_setting['ssl key and cert'][
'skipped_list'] = skipped
# Get the skipped list for ssl profile name.
# Changed ssl profile name to ssl profile ref.
if 'ssl_profile_ref' in virtual_service:
name, skipped = self.get_ssl_profile_skipped(
csv_writer_dict_list, virtual_service['ssl_profile_ref'],
vs_ref)
if skipped:
skipped_setting['ssl profile'] = {}
skipped_setting['ssl profile']['name'] = name
skipped_setting['ssl profile']['skipped_list'] = skipped
# Get the skipped list for pool group.
if 'pool_group_ref' in virtual_service:
pool_group_name = self.get_name(
virtual_service['pool_group_ref'])
csv_object = self.get_csv_object_list(
csv_writer_dict_list, ['bind lb vserver'])
self.get_pool_skipped_list(
avi_config, pool_group_name, skipped_setting, csv_object,
'pool group', csv_writer_dict_list, vs_ref)
# Get the skipepd list for http policy.
if 'http_policies' in virtual_service:
csv_object = self.get_csv_object_list(
csv_writer_dict_list,
['add cs policy', 'add responder policy',
'add rewrite policy'])
for http_ref in virtual_service['http_policies']:
http_name = self.get_name(http_ref['http_policy_set_ref'])
skipped_list = self.get_csv_skipped_list(csv_object,
http_name,
vs_ref)
if skipped_list:
skipped_setting['Httppolicy'] = {}
skipped_setting['Httppolicy']['name'] = http_name
skipped_setting['Httppolicy'][
'skipped_list'] = skipped_list
# Get the http policy name
for each_http_policy in avi_config['HTTPPolicySet']:
if each_http_policy['name'] == http_name:
for http_req in \
each_http_policy['http_request_policy'][
'rules']:
if http_req.get('switching_action', None) and \
http_req['switching_action'].get(
'pool_group_ref', None):
pool_group_name = self.get_name(
http_req['switching_action']
['pool_group_ref'])
self.get_pool_skipped_list(
avi_config, pool_group_name,
skipped_setting, csv_object,
'Httppolicy',
csv_writer_dict_list, vs_ref)
# Get the skipped list for application_profile_ref.
if 'application_profile_ref' in virtual_service and \
'admin:System' not in \
virtual_service['application_profile_ref']:
name, skipped = self.get_application_profile_skipped(
csv_writer_dict_list, virtual_service, vs_ref)
if skipped:
skipped_setting['Application profile'] = {}
skipped_setting['Application profile'][
'name'] = name
skipped_setting['Application profile'][
'skipped_list'] = skipped
# Get the skipped list for network profile ref.
if 'network_profile_ref' in virtual_service and \
'admin:System' not in \
virtual_service['network_profile_ref']:
name, skipped = self.get_network_profile_skipped(
csv_writer_dict_list, virtual_service, vs_ref)
if skipped:
skipped_setting['Network profile'] = {}
skipped_setting['Network profile'][
'name'] = name
skipped_setting['Network profile'][
'skipped_list'] = skipped
# Update overall skipped setting of VS csv row
if skipped_setting:
vs_csv_object.update(
{'Overall skipped settings': str(skipped_setting)})
else:
vs_csv_object.update(
{'Overall skipped settings': "FULLY MIGRATION"})
fully_migrated += 1
msg = "Writing excel sheet started..."
self.print_progress_bar(progressbar_count, total_count, msg,
prefix='Progress', suffix='')
csv_objects = [row for row in csv_writer_dict_list
if row['Status'] in [STATUS_PARTIAL, STATUS_SUCCESSFUL]
and row['Netscaler Command'] not in ['add cs vserver',
'add lb vserver']
and (
'VS Reference' not in row or not row[
'VS Reference'])]
# Update the vs reference not in used if objects are not attached to
# VS directly or indirectly
for csv_object in csv_objects:
csv_object['VS Reference'] = STATUS_NOT_IN_USE
def write_status_report_and_pivot_table_in_xlsx(self, row_list, output_dir,
report_name, vs_level_status):
"""
This method writes the status and make pivot table in excel sheet
:param row_list:
:param output_dir:
:param report_name:
:param vs_level_status:
:return:
"""
global total_count
global progressbar_count
# List of fieldnames for headers
if vs_level_status:
fieldnames = ['Line Number', 'Netscaler Command', 'Object Name',
'Full Command', 'Status', 'Skipped settings',
'Indirect mapping', 'Not Applicable', 'User Ignored',
'Overall skipped settings', 'Complexity Level',
'VS Reference', 'AVI Object']
else:
fieldnames = ['Line Number', 'Netscaler Command', 'Object Name',
'Full Command', 'Status', 'Skipped settings',
'Indirect mapping', 'Not Applicable', 'User Ignored',
'Complexity Level' , 'AVI Object']
xlsx_report = output_dir + os.path.sep + ("%s-ConversionStatus.xlsx" %
report_name)
# xlsx workbook
status_wb = Workbook(xlsx_report)
# xlsx worksheet
status_ws = status_wb.add_worksheet("Status Sheet")
# Lock the first row of xls report.
status_ws.freeze_panes(1, 0)
first_row = 0
for header in fieldnames:
col = fieldnames.index(header)
status_ws.write(first_row, col, header)
row = 1
for row_data in row_list:
progressbar_count += 1
for _key, _value in row_data.items():
if _key in fieldnames:
col = fieldnames.index(_key)
status_ws.write(row, col, _value)
msg = "Writing excel sheet started..."
self.print_progress_bar(progressbar_count, total_count, msg,
prefix='Progress', suffix='')
row += 1
status_wb.close()
# create dataframe for row list
df = pandas.DataFrame(row_list, columns=fieldnames)
# create pivot table using pandas
pivot_table = pandas.pivot_table(df,
index=["Status", "Netscaler Command"],
values=[], aggfunc=[len], fill_value=0)
# create dataframe for pivot table using pandas
pivot_df = pandas.DataFrame(pivot_table)
master_book = load_workbook(xlsx_report)
master_writer = pandas.ExcelWriter(xlsx_report, engine='openpyxl')
master_writer.book = master_book
# Add pivot table in Pivot sheet
pivot_df.to_excel(master_writer, 'Pivot Sheet')
master_writer.save()
def update_skip_duplicates(self, obj, obj_list, obj_type,
merge_object_mapping, name, ent_type, prefix,
syslist):
"""
This method merge duplicate objects
:param obj: Source object to find duplicates for
:param obj_list: List of object to search duplicates in
:param obj_type: Type of object to add in converted_objs status
:param converted_objs: Converted avi object or merged object name
:param name: Name of the object
:param default_profile_name : Name of root parent default profile
:return:
"""
dup_of = None
merge_object_mapping[obj_type].update({name: name})
dup_of, old_name = self.check_for_duplicates(obj, obj_list, obj_type,
merge_object_mapping, ent_type,
prefix,
syslist)
if dup_of:
LOG.info(
"Duplicate profiles: %s merged in %s" % (obj['name'], dup_of))
# Update value of ssl profile with merged profile
if old_name in merge_object_mapping[obj_type].keys():
merge_object_mapping[obj_type].update({old_name: dup_of})
merge_object_mapping[obj_type].update({name: dup_of})
return True
return False
def create_update_vsvip(self, vip, vsvip_config, tenant_ref, cloud_ref,
prefix=None, vrf_ref=None):
"""
This functions defines that create or update VSVIP object.
:param vip: vip of VS
:param vsvip_config: List of vs object
:param tenant_ref: tenant reference
:param cloud_ref: cloud reference
:param prefix: prefix for objects
:param vrf_ref: VRF ref to be added in VIP object
:return: None
"""
# Get the exsting vsvip object list if present
name = vip + '-vsvip'
# Added prefix for objects
if prefix:
name = prefix + '-' + name
vsvip = [vip_obj for vip_obj in vsvip_config
if vip_obj['name'] == name]
if vsvip:
diff_ten = [vips for vips in vsvip if vips['tenant_ref'] !=
tenant_ref]
if diff_ten:
LOG.debug('VsVip %s is repeated with vrf %s but different '
'tenant %s', name, self.get_name(vrf_ref) if vrf_ref
else 'None', self.get_name(tenant_ref))
name = ''
# If VSVIP object not present then create new VSVIP object.
else:
vsvip_object = {
"name": name,
"tenant_ref": tenant_ref,
"cloud_ref": cloud_ref,
"vip": [
{
"vip_id": "0",
"ip_address": {
"type": "V4",
"addr": vip
}
}
],
}
if vrf_ref:
vsvip_object["vrf_context_ref"] = vrf_ref
vsvip_config.append(vsvip_object)
def get_redirect_fail_action(self, url):
"""
This method returns the fail action dict
:param url: url
:return:
"""
parsed = urlparse(url)
redirect_fail_action = {
'fail_action': {
'redirect': {
'host': parsed.hostname,
'protocol': str(parsed.scheme).upper(),
'status_code': "HTTP_REDIRECT_STATUS_CODE_302"
},
"type": "FAIL_ACTION_HTTP_REDIRECT"
}
}
if parsed.path:
redirect_fail_action['fail_action']['redirect']['path'] = \
str(parsed.path).replace('"', '')
if parsed.query:
redirect_fail_action['fail_action']['redirect'][
'query'] = parsed.query
return redirect_fail_action
def cleanup_dupof(self, avi_config):
"""
This method is used to clean up dup_of key from different AVI objects
:param avi_config:
:return:
"""
self.remove_dup_key(avi_config["ApplicationProfile"])
self.remove_dup_key(avi_config["NetworkProfile"])
self.remove_dup_key(avi_config["SSLProfile"])
self.remove_dup_key(avi_config['PKIProfile'])
self.remove_dup_key(avi_config["ApplicationPersistenceProfile"])
self.remove_dup_key(avi_config['HealthMonitor'])
def update_profile_ref(self, ref, avi_obj, merge_obj_list):
"""
This method is used to update the profile references which was
attached at the time of creation
:param ref:
:param avi_obj:
:param merge_obj_list:
:return:
"""
for obj in avi_obj:
obj_ref = obj.get(ref)
tenant_ref = obj.get('tenant_ref')
if obj_ref:
name = self.get_name(obj_ref)
tenant = self.get_name(tenant_ref)
if name in merge_obj_list:
updated_name = merge_obj_list[name]
if ref == 'application_persistence_profile_ref':
type_cons = OBJECT_TYPE_APPLICATION_PERSISTENCE_PROFILE
if ref == 'application_profile_ref':
type_cons = OBJECT_TYPE_APPLICATION_PROFILE
obj[ref] = self.get_object_ref(updated_name, type_cons,
tenant)
def vs_redirect_http_to_https(self, avi_config, sysdict):
"""
Removes the VS which is redirected to another VS amd update the
status and avi object for that VS
:param avi_config: avi configuration after all conversion
:param sysdict: system configuration
:return:
"""
vsrem = {}
LOG.debug("Check started for redirect from HTTP VS to HTTPS VS with "
"no pool")
for vs in avi_config['VirtualService']:
if not vs.get('pool_group_ref') and not vs.get(
'application_profile_ref') and vs.get('services', []) and \
not all([s.get('enable_ssl', True)for s in vs['services']])\
and vs.get('http_policies',[]) and vs['http_policies'][
0].get('http_policy_set_ref'):
polname = self.get_name(vs['http_policies'][0][
'http_policy_set_ref'])
pol = [pl for pl in avi_config['HTTPPolicySet'] if pl['name']
== polname]
if pol and pol[0].get('http_request_policy', {}).get('rules',
[]) and pol[0]['http_request_policy']['rules'][0].get(
'redirect_action'):
iplist = [ip['ip_address']['addr'] for ip in vs.get('vip',
[]) if ip.get('ip_address',{}).get('addr')] or (
[vs['ip_address']['addr']] if vs.get(
'ip_address',{}).get('addr') else [])
if iplist:
for nvs in avi_config['VirtualService']:
if vs['name'] != nvs['name'] and [ip for ip in
iplist if ip in ([nip['ip_address']['addr']
for nip in nvs.get('vip', []) if nip.get(
'ip_address',{}).get('addr')] or [nvs[
'ip_address']['addr'] if nvs.get(
'ip_address',{}).get('addr') else []])]:
appname = self.get_name(nvs[
'application_profile_ref']) if \
nvs.get('application_profile_ref') \
else None
if appname == 'ns-migrate-http':
LOG.debug("%s has redirect to %s, hence "
"removing %s" % (vs['name'],
nvs['name'], vs['name']))
vsrem[vs['name']] = nvs['name']
appprof = [pr for pr in (avi_config[
'ApplicationProfile'] + sysdict[
'ApplicationProfile']) if pr['name']
== appname]
if appprof and appprof[0]['type'] == \
'APPLICATION_PROFILE_TYPE_HTTP':
if appprof[0].get('http_profile'):
appprof[0]['http_profile'][
'http_to_https'] = True
else:
appprof[0]['http_profile'] = {
'http_to_https': True}
LOG.debug("%s has redirect to %s, hence "
"setting 'http_to_https' as true "
"and removing %s" %(vs['name'],
nvs['name'], vs['name']))
vsrem[vs['name']] = nvs['name']
# Condition to merge http ports to https vs
if [True for ssl in nvs['services'] if ssl[
'enable_ssl']] and \
[True for ssl_vs in vs['services']
if not ssl_vs['enable_ssl']]:
nvs['services'].append(vs['services'][0])
vsrem[vs['name']] = nvs['name']
LOG.debug("Check completed for redirect from HTTP VS to HTTPS VS with "
"no pool")
if vsrem:
avi_config['VirtualService'] = [v for v in avi_config[
'VirtualService'] if v['name'] not
in vsrem.keys()]
LOG.debug('%s VS got removed from AVI configuration' % str(len(
vsrem)))
for cl in csv_writer_dict_list:
if cl['Object Name'] in vsrem.keys() and cl[
'Netscaler Command'] in ['add lb vserver', 'add cs vserver']:
cl['Status'] = STATUS_INDIRECT
cl['AVI Object'] = 'Redirected to %s' % vsrem[cl[
'Object Name']]
def merge_pool(self, avi_config):
"""
This method merge the pools in AVI if HM is same
:param avi_config:
:return:
"""
mergelist=[]
for poolgrp in avi_config['PoolGroup']:
if poolgrp['name'] == 'lb-depoed1cdb.qai-pri-5984-poolgroup':
print('found')
# do not merge the pool if it is a backup pool in the group
pool_member = [obj for obj in poolgrp['members'] if not
obj.get('priority_label', '10') == '2']
length = len(pool_member)
for count in range(length):
pool_name = pool_member[count]['pool_ref'].split(
'&')[1].split('=')[1]
if pool_name in mergelist:
continue
pool = [pl for pl in avi_config['Pool']
if pl['name'] == pool_name]
if not pool:
LOG.debug("'%s' not present" % pool_name)
continue
for count2 in range(count+1, length):
pname = pool_member[count2]['pool_ref'].split(
'&')[1].split('=')[1]
nextpool = [pol for pol in avi_config['Pool']
if pol['name'] == pname]
if not nextpool:
LOG.debug("'%s' not present" % pname)
continue
if pool[0]['health_monitor_refs'].sort() == nextpool[0][
'health_monitor_refs'].sort():
LOG.debug("Merging pool '%s' in '%s'" % (nextpool[0][
'name'], pool[0]['name']))
ip_port = set()
for ser in pool[0]['servers']:
ip_port.add(str(ser['ip']['addr']) + ':' + str(
ser['port']))
for server in nextpool[0]['servers']:
ipport = str(server['ip']['addr']) + ':' + str(
server['port'])
if ipport not in list(ip_port):
pool[0]['servers'].append(server)
for cl in csv_writer_dict_list:
if cl['Object Name'] == (nextpool[0][
'name'].replace('-pool','')) and cl[
'Netscaler Command'] in ['add service',
'add serviceGroup']:
cl['AVI Object'] = 'Merged to %s' % pool[0][
'name']
mergelist.append(nextpool[0]['name'])
for plg in avi_config['PoolGroup']:
plg['members'] = [member for member in plg['members'] if
member['pool_ref'].split('&')[1].split('=')[1] not
in mergelist]
avi_config['Pool'] = [pools for pools in avi_config['Pool'] if pools[
'name'] not in mergelist]
def add_policy(self, policy, updated_vs_name, avi_config, tmp_policy_ref,
vs_obj, tenant_name, cloud_name, prefix, used_poolgrp_ref):
"""
This method is used to add policy objects to AVI and also add
reference in VS
:param policy: policy object
:param updated_vs_name: vs name
:param avi_config: avi config dict
:param tmp_policy_ref: list of policy ref which are already used
:param vs_obj: vs object
:param tenant_name: name of tenant
:param cloud_name: name of cloud
:param prefix: prefix
:param used_poolgrp_ref: list of used pool group ref
:return:
"""
if policy['name'] in tmp_policy_ref:
# clone the http policy set if it is referenced to other VS
policy = self.clone_http_policy_set(policy, updated_vs_name,
avi_config, tenant_name, cloud_name, used_poolgrp_ref,
userprefix=prefix)
updated_http_policy_ref = self.get_object_ref(policy['name'],
OBJECT_TYPE_HTTP_POLICY_SET, tenant_name)
tmp_policy_ref.append(policy['name'])
http_policies = {
'index': 11,
'http_policy_set_ref': updated_http_policy_ref
}
if not vs_obj.get('http_policies'):
vs_obj['http_policies'] = []
else:
ind = max([policies['index'] for policies in vs_obj[
'http_policies']])
http_policies['index'] = ind + 1
vs_obj['http_policies'].append(http_policies)
avi_config['HTTPPolicySet'].append(policy)
def build_redirect_action_dict(self, redirect_url, enable_ssl):
"""
This method returns a redirect action dict
:param redirect_url: redirect url
:param enable_ssl: flag for ssl enable
:return:
"""
redirect_url = self.parse_url(redirect_url)
protocol = str(redirect_url.scheme).upper()
hostname = str(redirect_url.hostname)
pathstring = str(redirect_url.path)
querystring = str(redirect_url.query)
full_path = '%s?%s' % (pathstring, querystring) if pathstring and \
querystring else pathstring
protocol = enable_ssl and 'HTTPS' or 'HTTP' if not protocol else \
protocol
action = {
'protocol': protocol
}
if hostname:
action.update({'host':
{
'type': 'URI_PARAM_TYPE_TOKENIZED',
'tokens': [{
'type': 'URI_TOKEN_TYPE_STRING',
'str_value': hostname,
'start_index': '0',
'end_index': '65535'
}]
}
})
if full_path:
action.update({'path':
{
'type': 'URI_PARAM_TYPE_TOKENIZED',
'tokens': [{
'type': 'URI_TOKEN_TYPE_STRING',
'str_value': full_path,
'start_index': '0',
'end_index': '65535'
}]
}
})
return action
def create_http_to_https_custom_profile(self):
'''
:return: custom application profile dict
'''
return {
'name': "ns-migrate-http",
'type': "APPLICATION_PROFILE_TYPE_HTTP",
'tenant_ref': "/api/tenant/?name=admin",
'preserve_client_ip': False,
'http_profile': {
'max_rps_uri': 0,
'keepalive_header': False,
'max_rps_cip_uri': 0,
'x_forwarded_proto_enabled': False,
'connection_multiplexing_enabled': True,
'websockets_enabled': True,
'enable_request_body_buffering': False,
'hsts_enabled': False,
'xff_enabled': True,
'disable_keepalive_posts_msie6': True,
'keepalive_timeout': 30000,
'ssl_client_certificate_mode': "SSL_CLIENT_CERTIFICATE_NONE",
'http_to_https': True,
'max_bad_rps_cip_uri': 0,
'client_body_timeout': 30000,
'httponly_enabled': False,
'hsts_max_age': 365,
'max_bad_rps_cip': 0,
'server_side_redirect_to_https': False,
'client_max_header_size': 12,
'client_max_request_size': 48,
'max_rps_unknown_uri': 0,
'post_accept_timeout': 30000,
'client_header_timeout': 10000,
'secure_cookie_enabled': False,
'xff_alternate_name': "X-Forwarded-For",
'max_rps_cip': 0,
'client_max_body_size': 0,
'max_rps_unknown_cip': 0,
'allow_dots_in_header_name': False,
'max_bad_rps_uri': 0,
'use_app_keepalive_timeout': False
},
'dos_rl_profile': {
'rl_profile': {
'client_ip_connections_rate_limit': {
'explicit_tracking': False,
'action': {
'status_code': "HTTP_LOCAL_RESPONSE_STATUS_CODE_429",
'type': "RL_ACTION_NONE"
},
'fine_grain': False
}
},
'dos_profile': {
'thresh_period': 5
}
}
}
def correct_vs_ref(self, avi_config):
"""
This method corrects the reference of VS to different objects
:param avi_config: avi configuration dict
:return:
"""
global csv_writer_dict_list
avi_graph = self.make_graph(avi_config)
csv_dict_sub = [row for row in csv_writer_dict_list if row[
'Netscaler Command'] not in ('add lb vserver',
'add cs vserver') and row[
'Status'] in (STATUS_PARTIAL,
STATUS_SUCCESSFUL)]
for dict_row in csv_dict_sub:
obj = dict_row['AVI Object']
if isinstance(obj, str) and obj.startswith('{'):
vs = []
if '__/__' in obj:
for dataobj in obj.split('__/__'):
obj = eval(dataobj)
self.add_vs_ref(obj, avi_graph, vs)
else:
obj = eval(obj)
self.add_vs_ref(obj, avi_graph, vs)
if vs:
dict_row['VS Reference'] = str(list(set(vs)))
else:
dict_row['VS Reference'] = STATUS_NOT_IN_USE
def add_vs_ref(self, obj, avi_graph, vs):
"""
Helper method for adding vs ref
:param obj: object
:param avi_graph: avi graph
:param vs: VS list
:return:
"""
obj_name = obj.get('name', obj.get('hostname'))
if obj_name:
if avi_graph.has_node(obj_name):
LOG.debug("Checked predecessor for %s", obj_name)
predecessor = list(avi_graph.predecessors(obj_name))
if predecessor:
self.get_predecessor(predecessor, avi_graph, vs)
else:
LOG.debug("Object %s may be merged or orphaned", obj_name)
def get_predecessor(self, predecessor, avi_graph, vs):
"""
This method gets the predecessor of the object
:param predecessor: predecessor list
:param avi_graph: avi graph
:param vs: VS list
:return:
"""
if len(predecessor) > 1:
for node in predecessor:
nodelist = [node]
self.get_predecessor(nodelist, avi_graph, vs)
elif len(predecessor):
node_obj = [nod for nod in list(avi_graph.nodes().data()) if
nod[0] == predecessor[0]]
if node_obj and (node_obj[0][1]['type'] == 'VS' or 'VS' in node_obj[
0][1]['type']):
LOG.debug("Predecessor %s found", predecessor[0])
vs.extend(predecessor)
else:
LOG.debug("Checked predecessor for %s", predecessor[0])
nodelist = list(avi_graph.predecessors(predecessor[0]))
self.get_predecessor(nodelist, avi_graph, vs)
else:
LOG.debug("No more predecessor")
| 45.734504
| 85
| 0.509453
|
import csv
import logging
import os
import copy
import re
import random
from functools import reduce
import ast
import pandas
import pexpect
import avi.migrationtools.netscaler_converter.ns_constants as ns_constants
from pkg_resources import parse_version
from xlsxwriter import Workbook
from openpyxl import load_workbook
from urllib.parse import urlparse
from OpenSSL import crypto
from socket import gethostname
from avi.migrationtools.netscaler_converter.ns_constants \
import (STATUS_SKIPPED, STATUS_SUCCESSFUL, STATUS_INDIRECT,
STATUS_NOT_APPLICABLE, STATUS_PARTIAL, STATUS_DATASCRIPT,
STATUS_INCOMPLETE_CONFIGURATION, STATUS_COMMAND_NOT_SUPPORTED,
OBJECT_TYPE_POOL_GROUP, OBJECT_TYPE_POOL, STATUS_NOT_IN_USE,
OBJECT_TYPE_HTTP_POLICY_SET, STATUS_LIST, COMPLEXITY_ADVANCED,
COMPLEXITY_BASIC, OBJECT_TYPE_APPLICATION_PERSISTENCE_PROFILE,
OBJECT_TYPE_APPLICATION_PROFILE)
from avi.migrationtools.avi_migration_utils import MigrationUtil, update_count
LOG = logging.getLogger(__name__)
csv_writer_dict_list = []
skipped_setting = {
}
progressbar_count = 0
total_count = 0
class NsUtil(MigrationUtil):
def add_conv_status(self, line_no, cmd, object_type, full_command, conv_status,
avi_object=None):
row = {
'Line Number': line_no if line_no else '',
'Netscaler Command': cmd if cmd else '',
'Object Name': object_type if object_type else '',
'Full Command': full_command if full_command else '',
'Status': conv_status.get('status', ''),
'Skipped settings': str(conv_status.get('skipped', '')),
'Indirect mapping': str(conv_status.get('indirect', '')),
'Not Applicable': str(conv_status.get('na_list', '')),
'User Ignored': str(conv_status.get('user_ignore', '')),
'AVI Object': str(avi_object) if avi_object else ''
}
csv_writer_dict_list.append(row)
def add_complete_conv_status(self, ns_config, output_dir, avi_config,
report_name, vs_level_status):
global csv_writer_dict_list
global progressbar_count
global total_count
print("Generating Report For Converted Configuration...")
ptotal = len(ns_config)
ppcount = 0
for config_key in ns_config:
ppcount += 1
config_object = ns_config[config_key]
msg = "Generating report"
self.print_progress_bar(ppcount, ptotal, msg, prefix='Progress',
suffix='')
for element_key in config_object:
element_object_list = config_object[element_key]
if isinstance(element_object_list, dict):
element_object_list = [element_object_list]
for element_object in element_object_list:
match = [match for match in csv_writer_dict_list if
match['Line Number'] == element_object['line_no']]
if not match:
ns_complete_command = self.get_netscalar_full_command(
config_key, element_object)
self.add_status_row(
element_object['line_no'], config_key,
element_object['attrs'][0], ns_complete_command,
STATUS_INCOMPLETE_CONFIGURATION)
unique_line_number_list = set()
row_list = []
for dict_row in csv_writer_dict_list:
if dict_row['Line Number'] not in unique_line_number_list:
unique_line_number_list.add(dict_row['Line Number'])
row_list.append(dict_row)
else:
row = [row for row in row_list
if row['Line Number'] == dict_row['Line Number']]
if str(dict_row['AVI Object']).startswith('Skipped'):
continue
if dict_row.get('AVI Object', None):
if str(row[0]['AVI Object']) != str(dict_row['AVI Object']):
row[0]['AVI Object'] += '__/__%s' % dict_row[
'AVI Object']
for status in STATUS_LIST:
status_list = [row for row in row_list if
row['Status'] == status]
print('%s: %s' % (status, len(status_list)))
print("Writing Excel Sheet For Converted Configuration...")
total_count = total_count + len(row_list)
if vs_level_status:
self.vs_per_skipped_setting_for_references(avi_config)
self.correct_vs_ref(avi_config)
else:
self.vs_complexity_level()
self.write_status_report_and_pivot_table_in_xlsx(
row_list, output_dir, report_name, vs_level_status)
def add_status_row(self, line_no, cmd, object_type, full_command, status,
avi_object=None):
global csv_writer_dict_list
row = {
'Line Number': line_no if line_no else '',
'Netscaler Command': cmd,
'Object Name': object_type,
'Full Command': full_command,
'Status': status,
'AVI Object': str(avi_object) if avi_object else ''
}
csv_writer_dict_list.append(row)
def add_csv_headers(self, csv_file):
global csv_writer
fieldnames = ['Line Number', 'Netscaler Command', 'Object Name',
'Full Command', 'Status', 'Skipped settings',
'Indirect mapping', 'Not Applicable', 'User Ignored',
'AVI Object']
csv_writer = csv.DictWriter(csv_file, fieldnames=fieldnames,
lineterminator='\n', )
csv_writer.writeheader()
def get_avi_lb_algorithm(self, ns_algorithm):
avi_algorithm = 'LB_ALGORITHM_LEAST_CONNECTIONS'
if ns_algorithm == 'LEASTCONNECTIONS':
avi_algorithm = 'LB_ALGORITHM_LEAST_CONNECTIONS'
elif ns_algorithm == 'ROUNDROBIN':
avi_algorithm = 'LB_ALGORITHM_ROUND_ROBIN'
elif ns_algorithm in ['LEASTRESPONSETIME', 'LRTM']:
avi_algorithm = 'LB_ALGORITHM_FASTEST_RESPONSE'
elif ns_algorithm == 'SOURCEIPHASH':
avi_algorithm = 'LB_ALGORITHM_CONSISTENT_HASH'
elif ns_algorithm == 'URLHASH':
avi_algorithm = 'LB_ALGORITHM_CONSISTENT_HASH_URI'
return avi_algorithm
def update_algo_for_pools(self, algo, pg_name, avi_config):
pool_group = [pg for pg in avi_config['PoolGroup'] if
pg['name'] == pg_name][0]
for member in pool_group['members']:
pool_name = self.get_name(member['pool_ref'])
pool = [pool for pool in avi_config['Pool'] if
pool['name'] == pool_name][0]
pool['lb_algorithm'] = algo
def get_avi_resp_code(self, respCode):
avi_resp_codes = []
codes = []
for res_code in respCode.split(' '):
if '-' in res_code:
codes.extend(res_code.split('-'))
else:
codes.append(res_code)
for code in codes:
if code and code.strip().isdigit():
code = int(code.strip())
if code < 200:
avi_resp_codes.append("HTTP_1XX")
elif code < 300:
avi_resp_codes.append("HTTP_2XX")
elif code < 400:
avi_resp_codes.append("HTTP_3XX")
elif code < 500:
avi_resp_codes.append("HTTP_4XX")
elif code < 600:
avi_resp_codes.append("HTTP_5XX")
avi_resp_codes = list(set(avi_resp_codes))
if not avi_resp_codes:
avi_resp_codes = ["HTTP_ANY"]
return avi_resp_codes
def get_conv_status(self, ns_object, skipped_list, na_list, indirect_list,
ignore_for_val=None, indirect_commands=None,
user_ignore_val=[]):
skipped = [attr for attr in ns_object.keys() if attr in skipped_list]
na = [attr for attr in ns_object.keys() if attr in na_list]
indirect = [attr for attr in ns_object.keys() if attr in indirect_list]
user_ignore = [val for val in skipped if val in user_ignore_val]
skipped = [attr for attr in skipped if attr not in user_ignore_val]
if ignore_for_val:
for key in ignore_for_val.keys():
if key not in ns_object:
continue
ns_val = ns_object.get(key)
ignore_val = ignore_for_val.get(key)
if key in skipped and str(ns_val) == str(ignore_val):
skipped.remove(key)
if skipped:
status = STATUS_PARTIAL
else:
status = STATUS_SUCCESSFUL
conv_status = {
'skipped': skipped,
'indirect': indirect,
'na_list': na,
'status': status,
'user_ignore': user_ignore
}
return conv_status
def get_key_cert_obj(self, name, key_file_name, cert_file_name, input_dir):
folder_path = input_dir + os.path.sep
key = self.upload_file(folder_path + key_file_name)
cert = self.upload_file(folder_path + cert_file_name)
ssl_kc_obj = None
if key and cert:
cert = {"certificate": cert}
ssl_kc_obj = {
'name': name,
'key': key,
'certificate': cert,
'key_passphrase': ''
}
return ssl_kc_obj
def get_command_from_line(self, line):
cmd = ''
line_no = 0
for member in line:
if 'line_no' in member:
line_no = member[1]
continue
if isinstance(member, str):
cmd += ' %s' % member
else:
cmd += ' -%s' % ' '.join(member)
return cmd, line_no
def update_status_for_skipped(self, skipped_cmds):
na_cmds = ns_constants.netscalar_command_status['NotApplicableCommands']
indirect_cmds = ns_constants.netscalar_command_status[
'IndirectCommands']
datascript_cmds = \
ns_constants.netscalar_command_status['DatascriptCommands']
not_supported = ns_constants.netscalar_command_status['NotSupported']
if not skipped_cmds:
return
for cmd in skipped_cmds:
line_no = cmd['line_no']
cmd = cmd['cmd']
cmd = cmd.strip()
for na_cmd in na_cmds:
if cmd.startswith(na_cmd):
self.add_status_row(line_no, na_cmd, None, cmd,
STATUS_NOT_APPLICABLE)
break
for id_cmd in indirect_cmds:
if cmd.startswith(id_cmd):
self.add_status_row(line_no, id_cmd, None, cmd, STATUS_INDIRECT)
break
for datascript_cmd in datascript_cmds:
if cmd.startswith(datascript_cmd):
self.add_status_row(line_no, datascript_cmd, None, cmd,
STATUS_DATASCRIPT)
break
for not_commands in not_supported:
if cmd.startswith(not_commands):
self.add_status_row(line_no, not_commands, None, cmd,
STATUS_COMMAND_NOT_SUPPORTED)
break
def remove_duplicate_objects(self, obj_type, obj_list):
if len(obj_list) == 1:
return obj_list
for source_obj in obj_list:
for index, tmp_obj in enumerate(obj_list):
if tmp_obj["name"] == source_obj["name"]:
continue
src_cp = copy.deepcopy(source_obj)
tmp_cp = copy.deepcopy(tmp_obj)
del src_cp["name"]
if "description" in src_cp:
del src_cp["description"]
del tmp_cp["name"]
if "description" in tmp_cp:
del tmp_cp["description"]
if src_cp.items() == tmp_cp.items():
LOG.warn('Remove duplicate %s object : %s' % (obj_type,
tmp_obj[
"name"]))
del obj_list[index]
self.remove_duplicate_objects(obj_type, obj_list)
return obj_list
def cleanup_config(self, config):
del config
def clone_pool(self, pool_name, cloned_for, avi_config, userprefix=None):
pools = [pool for pool in avi_config['Pool'] if
pool['name'] == pool_name]
if pools:
pool_obj = copy.deepcopy(pools[0])
pname = pool_obj['name']
pool_name = re.sub('[:]', '-', '%s-%s' % (pname, cloned_for))
pool_obj['name'] = pool_name
avi_config['Pool'].append(pool_obj)
LOG.info(
"Same pool reference to other object. Clone Pool %s for %s" %
(pool_name, cloned_for))
return pool_obj['name']
return None
def get_vs_if_shared_vip(self, avi_config, controller_version):
vs_list = [v for v in avi_config['VirtualService'] if
'port_range_end' in
v['services'][0]]
for vs in vs_list:
if parse_version(controller_version) >= parse_version('17.1'):
vs_port_list = [int(v['services'][0]['port']) for v in
avi_config['VirtualService']
if v['vsvip_ref'].split('name=')[1].split('-')[0] ==
vs['vsvip_ref'].split('name=')[1].split('-')[0]
and 'port_range_end' not in v['services'][0]]
else:
vs_port_list = [int(v['services'][0]['port']) for v in
avi_config['VirtualService'] if v['ip_address'][
'addr'] == vs['ip_address']['addr'] and
'port_range_end' not in v['services'][0]]
if vs_port_list:
min_port = min(vs_port_list)
max_port = max(vs_port_list)
vs['services'][0]['port_range_end'] = str(min_port - 1)
service = {
'enable_ssl': False,
'port': str(max_port + 1),
'port_range_end': '65535'
}
vs['services'].append(service)
def add_prop_for_http_profile(self, profile_name, avi_config, sysdict,
prop_dict):
profile = [p for p in (avi_config['ApplicationProfile'] + sysdict[
'ApplicationProfile']) if p['name'] == profile_name]
if profile:
if prop_dict.get('clttimeout'):
profile[0]['client_header_timeout'] = int(prop_dict[
'clttimeout'])
profile[0]['client_body_timeout'] = int(prop_dict['clttimeout'])
if prop_dict.get('xff_enabled'):
if profile[0].get('http_profile'):
profile[0]['http_profile'].update(
{
'xff_enabled': True,
'xff_alternate_name': 'X-Forwarded-For'
}
)
else:
profile[0].update({'http_profile':
{
'xff_enabled': True,
'xff_alternate_name': 'X-Forwarded-For'
}
})
if profile[0].get('http_profile'):
profile[0]['http_profile'].update(
{
'x_forwarded_proto_enabled': True,
'hsts_enabled': True,
'http_to_https': True,
'httponly_enabled': True,
'hsts_max_age': 365,
'server_side_redirect_to_https': True,
'secure_cookie_enabled': True
}
)
else:
profile[0].update({'http_profile':
{
'x_forwarded_proto_enabled': True,
'hsts_enabled': True,
'http_to_https': True,
'httponly_enabled': True,
'hsts_max_age': 365,
'server_side_redirect_to_https': True,
'secure_cookie_enabled': True
}
})
def object_exist(self, object_type, name, avi_config):
data = avi_config[object_type]
obj_list = [obj for obj in data if obj['name'] == name]
if obj_list:
return True
return False
def is_shared_same_vip(self, vs, cs_vs_list, avi_config, tenant_name,
cloud_name, tenant_ref, cloud_ref,
controller_version, prefix, input_vrf=None):
if parse_version(controller_version) >= parse_version('17.1'):
shared_vip = [v for v in cs_vs_list if v['vsvip_ref'
].split('name=')[1].split('-')[0] == vs['vsvip_ref'
].split('name=')[1].split('-')[0] and
v['services'][0][
'port'] == vs['services'][0]['port']]
else:
shared_vip = [v for v in cs_vs_list if v['ip_address']['addr'] ==
vs['ip_address']['addr'] and v['services'][0][
'port'] ==
vs['services'][0]['port']]
if input_vrf:
vrf_ref = self.get_object_ref(input_vrf, 'vrfcontext',
cloud_name=cloud_name)
else:
vrf_ref = self.get_object_ref('global', 'vrfcontext',
cloud_name=cloud_name)
if shared_vip:
return True
elif parse_version(controller_version) >= parse_version('17.1'):
vsvip = vs['vsvip_ref'].split('name=')[1].split('-')[0]
self.create_update_vsvip(vsvip, avi_config['VsVip'], tenant_ref,
cloud_ref, prefix=prefix, vrf_ref=vrf_ref)
name = vsvip + '-vsvip'
if prefix:
name = prefix + '-' + vsvip + '-vsvip'
updated_vsvip_ref = self.get_object_ref(
name, 'vsvip', tenant_name, cloud_name)
vs['vsvip_ref'] = updated_vsvip_ref
def clone_http_policy_set(self, policy, prefix, avi_config, tenant_name,
cloud_name, used_poolgrp_ref, userprefix=None):
policy_name = policy['name']
clone_policy = copy.deepcopy(policy)
for rule in clone_policy['http_request_policy']['rules']:
if rule.get('switching_action', None) and \
rule['switching_action'].get('pool_group_ref'):
pool_group_ref = \
rule['switching_action']['pool_group_ref'].split('&')[
1].split(
'=')[1]
if pool_group_ref in used_poolgrp_ref:
LOG.debug('Cloned the pool group for policy %s',
policy_name)
pool_group_ref = self.clone_pool_group(
pool_group_ref, policy_name, avi_config, tenant_name,
cloud_name, userprefix=userprefix)
if pool_group_ref:
updated_pool_group_ref = self.get_object_ref(
pool_group_ref, OBJECT_TYPE_POOL_GROUP, tenant_name,
cloud_name)
rule['switching_action']['pool_group_ref'] = \
updated_pool_group_ref
clone_policy['name'] += '-%s-clone' % prefix
return clone_policy
def set_rules_index_for_http_policy_set(self, avi_config):
http_policy_sets = avi_config['HTTPPolicySet']
for http_policy_set in http_policy_sets:
rules = http_policy_set['http_request_policy']['rules']
rules = sorted(rules, key=lambda d: int(d['index']))
for index, rule in enumerate(rules):
rule['index'] = index
def get_netscalar_full_command(self, netscalar_command, obj):
for attr in obj['attrs']:
netscalar_command += ' %s' % attr
for key in obj:
if isinstance(obj[key], list):
continue
if key == 'line_no':
continue
netscalar_command += ' -%s %s' % (key, obj[key])
return netscalar_command
def clone_pool_group(self, pg_name, cloned_for, avi_config, tenant_name,
cloud_name, userprefix=None):
pool_groups = [pg for pg in avi_config['PoolGroup']
if pg['name'] == pg_name]
if pool_groups:
pool_group = copy.deepcopy(pool_groups[0])
pool_group_name = re.sub('[:]', '-',
'%s-%s' % (pg_name, cloned_for))
pool_group['name'] = pool_group_name
for member in pool_group.get('members', []):
pool_ref = self.get_name(member['pool_ref'])
pool_ref = self.clone_pool(pool_ref, cloned_for, avi_config,
userprefix=userprefix)
if pool_ref:
updated_pool_ref = self.get_object_ref(
pool_ref, OBJECT_TYPE_POOL, tenant_name, cloud_name)
member['pool_ref'] = updated_pool_ref
avi_config['PoolGroup'].append(pool_group)
LOG.info(
"Same pool group reference to other object. Clone Pool group "
"%s for %s" % (pg_name, cloned_for))
return pool_group['name']
return None
def remove_http_mon_from_pool(self, avi_config, pool, sysdict):
if pool:
hm_refs = copy.deepcopy(pool['health_monitor_refs'])
for hm_ref in hm_refs:
hm = [h for h in (sysdict['HealthMonitor'] + avi_config[
'HealthMonitor']) if h['name'] == hm_ref]
if hm and hm[0]['type'] == 'HEALTH_MONITOR_HTTP':
pool['health_monitor_refs'].remove(hm_ref)
LOG.warning(
'Skipping %s this reference from %s pool because '
'of health monitor type is HTTP and VS has ssl '
'profile.' % (hm_ref, pool['name']))
def remove_https_mon_from_pool(self, avi_config, pool, sysdict):
if pool:
hm_refs = copy.deepcopy(pool['health_monitor_refs'])
for hm_ref in hm_refs:
hm = [h for h in (sysdict['HealthMonitor'] + avi_config[
'HealthMonitor']) if h['name'] == hm_ref]
if hm and hm[0]['type'] == 'HEALTH_MONITOR_HTTPS':
pool['health_monitor_refs'].remove(hm_ref)
LOG.warning(
'Skipping %s this reference from %s pool because '
'of health monitor type is HTTPS and VS has no ssl '
'profile.' % (hm_ref, pool['name']))
def update_application_profile(self, profile_name, pki_profile_ref,
tenant_ref, name, avi_config, sysdict):
try:
if profile_name:
app_profile = [p for p in (sysdict['ApplicationProfile'] +
avi_config['ApplicationProfile']) if
p['name'] ==
profile_name]
if app_profile:
app_profile[0]["http_profile"]['pki_profile_ref'] = \
pki_profile_ref
LOG.debug('Added PKI profile to application profile '
'successfully : %s' % (
profile_name, pki_profile_ref))
else:
app_profile = dict()
app_profile['name'] = name + '-%s-%s' % (
random.randrange(0, 1000),
ns_constants.PLACE_HOLDER_STR)
app_profile['tenant_ref'] = tenant_ref
app_profile['type'] = 'APPLICATION_PROFILE_TYPE_HTTP'
http_profile = dict()
http_profile['connection_multiplexing_enabled'] = False
http_profile['xff_enabled'] = False
http_profile['websockets_enabled'] = False
http_profile['pki_profile_ref'] = pki_profile_ref
app_profile["http_profile"] = http_profile
avi_config['ApplicationProfile'].append(app_profile)
LOG.debug(
"Conversion completed successfully for httpProfile: %s" %
app_profile['name'])
return app_profile['name']
except:
update_count('error')
LOG.error("Error in convertion of httpProfile", exc_info=True)
def convert_persistance_prof(self, vs, name, tenant_ref):
profile = None
persistenceType = vs.get('persistenceType', '')
if persistenceType == 'COOKIEINSERT':
timeout = vs.get('timeout', 2)
profile = {
"http_cookie_persistence_profile": {
"always_send_cookie": False
},
"persistence_type": "PERSISTENCE_TYPE_HTTP_COOKIE",
"server_hm_down_recovery": "HM_DOWN_PICK_NEW_SERVER",
"name": name,
}
if int(timeout) > 0:
profile['http_cookie_persistence_profile']["timeout"] = timeout
elif persistenceType == 'SOURCEIP':
timeout = vs.get('timeout', 120)
timeout = int(timeout) / 60
if timeout < 1:
timeout = 1
profile = {
"server_hm_down_recovery": "HM_DOWN_PICK_NEW_SERVER",
"persistence_type": "PERSISTENCE_TYPE_CLIENT_IP_ADDRESS",
"ip_persistence_profile": {
"ip_persistent_timeout": timeout
},
"name": name
}
elif persistenceType == 'SSLSESSION':
profile = {
"server_hm_down_recovery": "HM_DOWN_PICK_NEW_SERVER",
"persistence_type": "PERSISTENCE_TYPE_TLS",
"name": name
}
profile['tenant_ref'] = tenant_ref
return profile
def update_status_target_lb_vs_to_indirect(self, larget_lb_vs):
global csv_writer_dict_list
row = [row for row in csv_writer_dict_list
if row['Object Name'] == larget_lb_vs
and row['Netscaler Command'] == 'add lb vserver']
if row:
row[0]['Status'] = STATUS_INDIRECT
def create_http_policy_set_for_redirect_url(self, vs_obj, redirect_uri,
avi_config, tenant_name, tenant_ref, enable_ssl):
redirect_uri = str(redirect_uri).replace('"', '')
action = self.build_redirect_action_dict(redirect_uri, enable_ssl)
policy_obj = {
'name': vs_obj['name'] + '-redirect-policy',
'tenant_ref': tenant_ref,
'http_request_policy': {
'rules': [
{
'index': 0,
'name': vs_obj['name'] + '-redirect-policy-rule-0',
'match': {
'path': {
'match_case': 'INSENSITIVE',
'match_str': [
'/'
],
'match_criteria': 'EQUALS'
}
},
'redirect_action': action
}
]
}
}
updated_http_policy_ref = self.get_object_ref(policy_obj['name'],
OBJECT_TYPE_HTTP_POLICY_SET,
tenant_name)
http_policies = {
'index': 11,
'http_policy_set_ref': updated_http_policy_ref
}
if not vs_obj.get('http_policies'):
vs_obj['http_policies'] = []
else:
ind = max([policies['index'] for policies in vs_obj[
'http_policies']])
http_policies['index'] = ind + 1
vs_obj['http_policies'].append(http_policies)
avi_config['HTTPPolicySet'].append(policy_obj)
def clean_virtual_service_from_avi_config(self, avi_config,
controller_version):
vs_list = copy.deepcopy(avi_config['VirtualService'])
avi_config['VirtualService'] = []
if parse_version(controller_version) >= parse_version('17.1'):
avi_config['VirtualService'] = \
[vs for vs in vs_list
if vs['vsvip_ref'].split('name=')[1].split('-')[0] != '0.0.0.0']
else:
avi_config['VirtualService'] = \
[vs for vs in vs_list
if vs['ip_address']['addr'] != '0.0.0.0']
def parse_url(self, url):
parsed = urlparse(url)
return parsed
def format_string_to_json(self, avi_string):
avi_string = avi_string.split('__/__')[0]
return ast.literal_eval(avi_string)
def get_csv_object_list(self, csv_writer_dict_list, command_list):
csv_object = [row for row in
csv_writer_dict_list
if row['Status'] in [STATUS_PARTIAL, STATUS_SUCCESSFUL]
and row['Netscaler Command'] in
command_list]
return csv_object
def get_csv_skipped_list(self, csv_object, name_of_object, vs_ref):
skipped_list = []
for each_partial in csv_object:
avi_object_json = \
self.format_string_to_json(each_partial['AVI Object'])
if avi_object_json.get('name') and \
avi_object_json['name'] == name_of_object:
# Set the VS reference for Netscaler status row
each_partial['VS Reference'] = vs_ref
repls = ('[', ''), (']', '')
skipped_setting_csv = reduce(lambda a, kv: a.replace(*kv),
repls,
each_partial['Skipped settings'])
if skipped_setting_csv:
skipped_list.append(skipped_setting_csv)
return skipped_list
def get_ssl_key_and_cert_refs_skipped(self, csv_writer_dict_list,
object_name, vs_ref):
ssl_key_cert = \
self.get_name(object_name['ssl_key_and_certificate_refs'][0])
csv_object = self.get_csv_object_list(
csv_writer_dict_list, ['bind ssl vserver', 'bind ssl service',
'bind ssl serviceGroup'])
skipped_list = self.get_csv_skipped_list(csv_object, ssl_key_cert,
vs_ref)
return ssl_key_cert, skipped_list
def get_ssl_profile_skipped(self, csv_writer_dict_list, ssl_profile_ref,
vs_ref):
ssl_profile_name = self.get_name(ssl_profile_ref)
csv_object = \
self.get_csv_object_list(csv_writer_dict_list,
['set ssl vserver', 'set ssl service',
'set ssl serviceGroup'])
skipped_list = self.get_csv_skipped_list(csv_object, ssl_profile_name,
vs_ref)
return ssl_profile_name, skipped_list
def get_application_profile_skipped(self, csv_writer_dict_list,
name_of_object, vs_ref):
ssl_profile_name = self.get_name(
name_of_object['application_profile_ref'])
csv_object = self.get_csv_object_list(
csv_writer_dict_list, ['add ns httpProfile'])
skipped_list = self.get_csv_skipped_list(csv_object, ssl_profile_name,
vs_ref)
return ssl_profile_name, skipped_list
def get_network_profile_skipped(self, csv_writer_dict_list, name_of_object,
vs_ref):
ssl_profile_name = self.get_name(name_of_object['network_profile_ref'])
csv_object = self.get_csv_object_list(
csv_writer_dict_list, ['add ns tcpProfile'])
skipped_list = self.get_csv_skipped_list(csv_object, ssl_profile_name,
vs_ref)
return ssl_profile_name, skipped_list
def get_app_persistence_profile_skipped(self, csv_writer_dict_list,
name_of_object, vs_ref):
# Changed ssl profile name to ssl profile ref.
app_persistence_profile_name = self.get_name(
name_of_object['ssl_profile_ref'])
csv_object = self.get_csv_object_list(csv_writer_dict_list, ['set lb group'])
skipped_list = self.get_csv_skipped_list(
csv_object, app_persistence_profile_name, vs_ref)
return app_persistence_profile_name, skipped_list
def get_pool_skipped_list(self, avi_config, pool_group_name,
skipped_setting, csv_object, obj_name,
csv_writer_dict_list, vs_ref):
pool_group_object_ref = [pool_group_object_ref for pool_group_object_ref
in avi_config['PoolGroup'] if
pool_group_object_ref[
'name'] == pool_group_name]
for pool_group in pool_group_object_ref:
if 'members' in pool_group:
for each_pool_ref in pool_group['members']:
pool_name = self.get_name(each_pool_ref['pool_ref'])
skipped_list = self.get_csv_skipped_list(csv_object, pool_name,
vs_ref)
if len(skipped_list) > 0:
skipped_setting[obj_name] = {}
skipped_setting[obj_name]['pool'] = {}
skipped_setting[obj_name]['pool'][
'pool_name'] = pool_name
skipped_setting[obj_name]['pool']['pool_skipped_list'] \
= skipped_list
for pool_partial in csv_object:
avi_object_json = self.format_string_to_json(
pool_partial['AVI Object'])
if avi_object_json['name'] == pool_name:
if 'health_monitor_refs' in avi_object_json and \
avi_object_json['health_monitor_refs']:
monitor_refs = \
avi_object_json['health_monitor_refs']
for monitor_ref in monitor_refs:
monitor_ref = self.get_name(monitor_ref)
csv_object = self.get_csv_object_list(
csv_writer_dict_list,
['add lb monitor'])
skipped_list = self.get_csv_skipped_list(
csv_object, monitor_ref, vs_ref)
if skipped_list:
skipped_setting[obj_name] = {}
skipped_setting[obj_name]['pool'] = {}
skipped_setting[obj_name]['pool'][
'pool_name'] = pool_name
skipped_setting[obj_name]['pool'][
'health monitor'] = {}
skipped_setting[obj_name]['pool'][
'health monitor'][
'name'] = monitor_ref
skipped_setting[obj_name]['pool'][
'health monitor']['skipped_list'] =\
skipped_list
if 'ssl_key_and_certificate_refs' in avi_object_json:
name, skipped = \
self.get_ssl_key_and_cert_refs_skipped(
csv_writer_dict_list, avi_object_json,
vs_ref)
if skipped:
skipped_setting[obj_name] = {}
skipped_setting[obj_name]['pool'] = {}
skipped_setting[obj_name]['pool'][
'pool_name'] = pool_name
skipped_setting[
obj_name]['pool'][
'ssl key and cert'] = {}
skipped_setting[
obj_name]['pool']['ssl key and cert'][
'name'] = name
skipped_setting[
obj_name]['pool']['ssl key and cert'][
'skipped_list'] = skipped
if 'ssl_profile_ref' in avi_object_json:
name, skipped = \
self.get_ssl_profile_skipped(
csv_writer_dict_list, avi_object_json[
'ssl_profile_ref'], vs_ref)
if skipped:
skipped_setting[obj_name] = {}
skipped_setting[obj_name]['pool'] = {}
skipped_setting[obj_name]['pool'][
'pool_name'] = pool_name
skipped_setting[obj_name]['pool'][
'ssl profile'] = {}
skipped_setting[obj_name]['pool'][
'ssl profile']['name'] = name
skipped_setting[obj_name]['pool'][
'ssl profile']['skipped_list'] = skipped
# Get the skipped settings of application
# persistence profile ref.
if 'application_persistence_profile_ref' in \
avi_object_json:
name, skipped = \
self.get_app_persistence_profile_skipped(
csv_writer_dict_list, avi_object_json,
vs_ref)
if skipped:
skipped_setting[obj_name] = {}
skipped_setting[obj_name]['pool'] = {}
skipped_setting[obj_name]['pool'][
'pool_name'] = pool_name
skipped_setting[obj_name]['pool'][
'Application Persistence profile'] = {}
skipped_setting[obj_name]['pool'][
'Application Persistence profile'][
'name'] = name
skipped_setting[obj_name]['pool'][
'Application Persistence profile'][
'skipped_list'] = skipped
# Get the skipped settings of application
# persistence profile ref.
if 'application_persistence_profile_ref' \
in avi_object_json:
name, skipped = \
self.get_app_persistence_profile_skipped(
csv_writer_dict_list, avi_object_json,
vs_ref)
if skipped:
skipped_setting[obj_name] = {}
skipped_setting[obj_name]['pool'] = {}
skipped_setting[obj_name]['pool'][
'pool_name'] = pool_name
skipped_setting[obj_name]['pool'][
'Application Persistence profile'] = {}
skipped_setting[obj_name]['pool'][
'Application Persistence profile'][
'name'] = name
skipped_setting[obj_name]['pool'][
'Application Persistence profile'][
'skipped_list'] = skipped
def vs_complexity_level(self):
vs_csv_objects = [row for row in csv_writer_dict_list
if
row['Status'] in [STATUS_PARTIAL, STATUS_SUCCESSFUL]
and row['Netscaler Command'] in [
'add cs vserver', 'add lb vserver']]
for vs_csv_object in vs_csv_objects:
virtual_service = self.format_string_to_json(
vs_csv_object['AVI Object'])
# Update the complexity level of VS as Basic or Advanced
self.update_vs_complexity_level(vs_csv_object, virtual_service)
def vs_per_skipped_setting_for_references(self, avi_config):
# Get the count of vs sucessfully migrated
global fully_migrated
global total_count
global progressbar_count
fully_migrated = 0
# Get the VS object list which is having status successful and partial.
vs_csv_objects = [row for row in csv_writer_dict_list
if
row['Status'] in [STATUS_PARTIAL, STATUS_SUCCESSFUL]
and row['Netscaler Command'] in [
'add cs vserver', 'add lb vserver']]
# calculate total count
total_count = total_count + len(vs_csv_objects)
for vs_csv_object in vs_csv_objects:
progressbar_count += 1
skipped_setting = {}
virtual_service = self.format_string_to_json(
vs_csv_object['AVI Object'])
# Update the complexity level of VS as Basic or Advanced
self.update_vs_complexity_level(vs_csv_object, virtual_service)
vs_ref = virtual_service['name']
repls = ('[', ''), (']', '')
# Get list of skipped setting attributes
skipped_setting_csv = reduce(lambda a, kv: a.replace(*kv), repls,
vs_csv_object['Skipped settings'])
if skipped_setting_csv:
skipped_setting['virtual_service'] = [skipped_setting_csv]
# Get the skipped list for ssl key and cert
if 'ssl_key_and_certificate_refs' in virtual_service:
name, skipped = self.get_ssl_key_and_cert_refs_skipped(
csv_writer_dict_list, virtual_service, vs_ref)
if skipped:
skipped_setting['ssl key and cert'] = {}
skipped_setting['ssl key and cert']['name'] = name
skipped_setting['ssl key and cert'][
'skipped_list'] = skipped
# Get the skipped list for ssl profile name.
# Changed ssl profile name to ssl profile ref.
if 'ssl_profile_ref' in virtual_service:
name, skipped = self.get_ssl_profile_skipped(
csv_writer_dict_list, virtual_service['ssl_profile_ref'],
vs_ref)
if skipped:
skipped_setting['ssl profile'] = {}
skipped_setting['ssl profile']['name'] = name
skipped_setting['ssl profile']['skipped_list'] = skipped
# Get the skipped list for pool group.
if 'pool_group_ref' in virtual_service:
pool_group_name = self.get_name(
virtual_service['pool_group_ref'])
csv_object = self.get_csv_object_list(
csv_writer_dict_list, ['bind lb vserver'])
self.get_pool_skipped_list(
avi_config, pool_group_name, skipped_setting, csv_object,
'pool group', csv_writer_dict_list, vs_ref)
# Get the skipepd list for http policy.
if 'http_policies' in virtual_service:
csv_object = self.get_csv_object_list(
csv_writer_dict_list,
['add cs policy', 'add responder policy',
'add rewrite policy'])
for http_ref in virtual_service['http_policies']:
http_name = self.get_name(http_ref['http_policy_set_ref'])
skipped_list = self.get_csv_skipped_list(csv_object,
http_name,
vs_ref)
if skipped_list:
skipped_setting['Httppolicy'] = {}
skipped_setting['Httppolicy']['name'] = http_name
skipped_setting['Httppolicy'][
'skipped_list'] = skipped_list
# Get the http policy name
for each_http_policy in avi_config['HTTPPolicySet']:
if each_http_policy['name'] == http_name:
for http_req in \
each_http_policy['http_request_policy'][
'rules']:
if http_req.get('switching_action', None) and \
http_req['switching_action'].get(
'pool_group_ref', None):
pool_group_name = self.get_name(
http_req['switching_action']
['pool_group_ref'])
self.get_pool_skipped_list(
avi_config, pool_group_name,
skipped_setting, csv_object,
'Httppolicy',
csv_writer_dict_list, vs_ref)
# Get the skipped list for application_profile_ref.
if 'application_profile_ref' in virtual_service and \
'admin:System' not in \
virtual_service['application_profile_ref']:
name, skipped = self.get_application_profile_skipped(
csv_writer_dict_list, virtual_service, vs_ref)
if skipped:
skipped_setting['Application profile'] = {}
skipped_setting['Application profile'][
'name'] = name
skipped_setting['Application profile'][
'skipped_list'] = skipped
# Get the skipped list for network profile ref.
if 'network_profile_ref' in virtual_service and \
'admin:System' not in \
virtual_service['network_profile_ref']:
name, skipped = self.get_network_profile_skipped(
csv_writer_dict_list, virtual_service, vs_ref)
if skipped:
skipped_setting['Network profile'] = {}
skipped_setting['Network profile'][
'name'] = name
skipped_setting['Network profile'][
'skipped_list'] = skipped
# Update overall skipped setting of VS csv row
if skipped_setting:
vs_csv_object.update(
{'Overall skipped settings': str(skipped_setting)})
else:
vs_csv_object.update(
{'Overall skipped settings': "FULLY MIGRATION"})
fully_migrated += 1
msg = "Writing excel sheet started..."
self.print_progress_bar(progressbar_count, total_count, msg,
prefix='Progress', suffix='')
csv_objects = [row for row in csv_writer_dict_list
if row['Status'] in [STATUS_PARTIAL, STATUS_SUCCESSFUL]
and row['Netscaler Command'] not in ['add cs vserver',
'add lb vserver']
and (
'VS Reference' not in row or not row[
'VS Reference'])]
# Update the vs reference not in used if objects are not attached to
# VS directly or indirectly
for csv_object in csv_objects:
csv_object['VS Reference'] = STATUS_NOT_IN_USE
def write_status_report_and_pivot_table_in_xlsx(self, row_list, output_dir,
report_name, vs_level_status):
global total_count
global progressbar_count
# List of fieldnames for headers
if vs_level_status:
fieldnames = ['Line Number', 'Netscaler Command', 'Object Name',
'Full Command', 'Status', 'Skipped settings',
'Indirect mapping', 'Not Applicable', 'User Ignored',
'Overall skipped settings', 'Complexity Level',
'VS Reference', 'AVI Object']
else:
fieldnames = ['Line Number', 'Netscaler Command', 'Object Name',
'Full Command', 'Status', 'Skipped settings',
'Indirect mapping', 'Not Applicable', 'User Ignored',
'Complexity Level' , 'AVI Object']
xlsx_report = output_dir + os.path.sep + ("%s-ConversionStatus.xlsx" %
report_name)
# xlsx workbook
status_wb = Workbook(xlsx_report)
# xlsx worksheet
status_ws = status_wb.add_worksheet("Status Sheet")
# Lock the first row of xls report.
status_ws.freeze_panes(1, 0)
first_row = 0
for header in fieldnames:
col = fieldnames.index(header)
status_ws.write(first_row, col, header)
row = 1
for row_data in row_list:
progressbar_count += 1
for _key, _value in row_data.items():
if _key in fieldnames:
col = fieldnames.index(_key)
status_ws.write(row, col, _value)
msg = "Writing excel sheet started..."
self.print_progress_bar(progressbar_count, total_count, msg,
prefix='Progress', suffix='')
row += 1
status_wb.close()
# create dataframe for row list
df = pandas.DataFrame(row_list, columns=fieldnames)
# create pivot table using pandas
pivot_table = pandas.pivot_table(df,
index=["Status", "Netscaler Command"],
values=[], aggfunc=[len], fill_value=0)
# create dataframe for pivot table using pandas
pivot_df = pandas.DataFrame(pivot_table)
master_book = load_workbook(xlsx_report)
master_writer = pandas.ExcelWriter(xlsx_report, engine='openpyxl')
master_writer.book = master_book
# Add pivot table in Pivot sheet
pivot_df.to_excel(master_writer, 'Pivot Sheet')
master_writer.save()
def update_skip_duplicates(self, obj, obj_list, obj_type,
merge_object_mapping, name, ent_type, prefix,
syslist):
dup_of = None
merge_object_mapping[obj_type].update({name: name})
dup_of, old_name = self.check_for_duplicates(obj, obj_list, obj_type,
merge_object_mapping, ent_type,
prefix,
syslist)
if dup_of:
LOG.info(
"Duplicate profiles: %s merged in %s" % (obj['name'], dup_of))
# Update value of ssl profile with merged profile
if old_name in merge_object_mapping[obj_type].keys():
merge_object_mapping[obj_type].update({old_name: dup_of})
merge_object_mapping[obj_type].update({name: dup_of})
return True
return False
def create_update_vsvip(self, vip, vsvip_config, tenant_ref, cloud_ref,
prefix=None, vrf_ref=None):
# Get the exsting vsvip object list if present
name = vip + '-vsvip'
# Added prefix for objects
if prefix:
name = prefix + '-' + name
vsvip = [vip_obj for vip_obj in vsvip_config
if vip_obj['name'] == name]
if vsvip:
diff_ten = [vips for vips in vsvip if vips['tenant_ref'] !=
tenant_ref]
if diff_ten:
LOG.debug('VsVip %s is repeated with vrf %s but different '
'tenant %s', name, self.get_name(vrf_ref) if vrf_ref
else 'None', self.get_name(tenant_ref))
name = ''
# If VSVIP object not present then create new VSVIP object.
else:
vsvip_object = {
"name": name,
"tenant_ref": tenant_ref,
"cloud_ref": cloud_ref,
"vip": [
{
"vip_id": "0",
"ip_address": {
"type": "V4",
"addr": vip
}
}
],
}
if vrf_ref:
vsvip_object["vrf_context_ref"] = vrf_ref
vsvip_config.append(vsvip_object)
def get_redirect_fail_action(self, url):
parsed = urlparse(url)
redirect_fail_action = {
'fail_action': {
'redirect': {
'host': parsed.hostname,
'protocol': str(parsed.scheme).upper(),
'status_code': "HTTP_REDIRECT_STATUS_CODE_302"
},
"type": "FAIL_ACTION_HTTP_REDIRECT"
}
}
if parsed.path:
redirect_fail_action['fail_action']['redirect']['path'] = \
str(parsed.path).replace('"', '')
if parsed.query:
redirect_fail_action['fail_action']['redirect'][
'query'] = parsed.query
return redirect_fail_action
def cleanup_dupof(self, avi_config):
self.remove_dup_key(avi_config["ApplicationProfile"])
self.remove_dup_key(avi_config["NetworkProfile"])
self.remove_dup_key(avi_config["SSLProfile"])
self.remove_dup_key(avi_config['PKIProfile'])
self.remove_dup_key(avi_config["ApplicationPersistenceProfile"])
self.remove_dup_key(avi_config['HealthMonitor'])
def update_profile_ref(self, ref, avi_obj, merge_obj_list):
for obj in avi_obj:
obj_ref = obj.get(ref)
tenant_ref = obj.get('tenant_ref')
if obj_ref:
name = self.get_name(obj_ref)
tenant = self.get_name(tenant_ref)
if name in merge_obj_list:
updated_name = merge_obj_list[name]
if ref == 'application_persistence_profile_ref':
type_cons = OBJECT_TYPE_APPLICATION_PERSISTENCE_PROFILE
if ref == 'application_profile_ref':
type_cons = OBJECT_TYPE_APPLICATION_PROFILE
obj[ref] = self.get_object_ref(updated_name, type_cons,
tenant)
def vs_redirect_http_to_https(self, avi_config, sysdict):
vsrem = {}
LOG.debug("Check started for redirect from HTTP VS to HTTPS VS with "
"no pool")
for vs in avi_config['VirtualService']:
if not vs.get('pool_group_ref') and not vs.get(
'application_profile_ref') and vs.get('services', []) and \
not all([s.get('enable_ssl', True)for s in vs['services']])\
and vs.get('http_policies',[]) and vs['http_policies'][
0].get('http_policy_set_ref'):
polname = self.get_name(vs['http_policies'][0][
'http_policy_set_ref'])
pol = [pl for pl in avi_config['HTTPPolicySet'] if pl['name']
== polname]
if pol and pol[0].get('http_request_policy', {}).get('rules',
[]) and pol[0]['http_request_policy']['rules'][0].get(
'redirect_action'):
iplist = [ip['ip_address']['addr'] for ip in vs.get('vip',
[]) if ip.get('ip_address',{}).get('addr')] or (
[vs['ip_address']['addr']] if vs.get(
'ip_address',{}).get('addr') else [])
if iplist:
for nvs in avi_config['VirtualService']:
if vs['name'] != nvs['name'] and [ip for ip in
iplist if ip in ([nip['ip_address']['addr']
for nip in nvs.get('vip', []) if nip.get(
'ip_address',{}).get('addr')] or [nvs[
'ip_address']['addr'] if nvs.get(
'ip_address',{}).get('addr') else []])]:
appname = self.get_name(nvs[
'application_profile_ref']) if \
nvs.get('application_profile_ref') \
else None
if appname == 'ns-migrate-http':
LOG.debug("%s has redirect to %s, hence "
"removing %s" % (vs['name'],
nvs['name'], vs['name']))
vsrem[vs['name']] = nvs['name']
appprof = [pr for pr in (avi_config[
'ApplicationProfile'] + sysdict[
'ApplicationProfile']) if pr['name']
== appname]
if appprof and appprof[0]['type'] == \
'APPLICATION_PROFILE_TYPE_HTTP':
if appprof[0].get('http_profile'):
appprof[0]['http_profile'][
'http_to_https'] = True
else:
appprof[0]['http_profile'] = {
'http_to_https': True}
LOG.debug("%s has redirect to %s, hence "
"setting 'http_to_https' as true "
"and removing %s" %(vs['name'],
nvs['name'], vs['name']))
vsrem[vs['name']] = nvs['name']
if [True for ssl in nvs['services'] if ssl[
'enable_ssl']] and \
[True for ssl_vs in vs['services']
if not ssl_vs['enable_ssl']]:
nvs['services'].append(vs['services'][0])
vsrem[vs['name']] = nvs['name']
LOG.debug("Check completed for redirect from HTTP VS to HTTPS VS with "
"no pool")
if vsrem:
avi_config['VirtualService'] = [v for v in avi_config[
'VirtualService'] if v['name'] not
in vsrem.keys()]
LOG.debug('%s VS got removed from AVI configuration' % str(len(
vsrem)))
for cl in csv_writer_dict_list:
if cl['Object Name'] in vsrem.keys() and cl[
'Netscaler Command'] in ['add lb vserver', 'add cs vserver']:
cl['Status'] = STATUS_INDIRECT
cl['AVI Object'] = 'Redirected to %s' % vsrem[cl[
'Object Name']]
def merge_pool(self, avi_config):
mergelist=[]
for poolgrp in avi_config['PoolGroup']:
if poolgrp['name'] == 'lb-depoed1cdb.qai-pri-5984-poolgroup':
print('found')
pool_member = [obj for obj in poolgrp['members'] if not
obj.get('priority_label', '10') == '2']
length = len(pool_member)
for count in range(length):
pool_name = pool_member[count]['pool_ref'].split(
'&')[1].split('=')[1]
if pool_name in mergelist:
continue
pool = [pl for pl in avi_config['Pool']
if pl['name'] == pool_name]
if not pool:
LOG.debug("'%s' not present" % pool_name)
continue
for count2 in range(count+1, length):
pname = pool_member[count2]['pool_ref'].split(
'&')[1].split('=')[1]
nextpool = [pol for pol in avi_config['Pool']
if pol['name'] == pname]
if not nextpool:
LOG.debug("'%s' not present" % pname)
continue
if pool[0]['health_monitor_refs'].sort() == nextpool[0][
'health_monitor_refs'].sort():
LOG.debug("Merging pool '%s' in '%s'" % (nextpool[0][
'name'], pool[0]['name']))
ip_port = set()
for ser in pool[0]['servers']:
ip_port.add(str(ser['ip']['addr']) + ':' + str(
ser['port']))
for server in nextpool[0]['servers']:
ipport = str(server['ip']['addr']) + ':' + str(
server['port'])
if ipport not in list(ip_port):
pool[0]['servers'].append(server)
for cl in csv_writer_dict_list:
if cl['Object Name'] == (nextpool[0][
'name'].replace('-pool','')) and cl[
'Netscaler Command'] in ['add service',
'add serviceGroup']:
cl['AVI Object'] = 'Merged to %s' % pool[0][
'name']
mergelist.append(nextpool[0]['name'])
for plg in avi_config['PoolGroup']:
plg['members'] = [member for member in plg['members'] if
member['pool_ref'].split('&')[1].split('=')[1] not
in mergelist]
avi_config['Pool'] = [pools for pools in avi_config['Pool'] if pools[
'name'] not in mergelist]
def add_policy(self, policy, updated_vs_name, avi_config, tmp_policy_ref,
vs_obj, tenant_name, cloud_name, prefix, used_poolgrp_ref):
if policy['name'] in tmp_policy_ref:
policy = self.clone_http_policy_set(policy, updated_vs_name,
avi_config, tenant_name, cloud_name, used_poolgrp_ref,
userprefix=prefix)
updated_http_policy_ref = self.get_object_ref(policy['name'],
OBJECT_TYPE_HTTP_POLICY_SET, tenant_name)
tmp_policy_ref.append(policy['name'])
http_policies = {
'index': 11,
'http_policy_set_ref': updated_http_policy_ref
}
if not vs_obj.get('http_policies'):
vs_obj['http_policies'] = []
else:
ind = max([policies['index'] for policies in vs_obj[
'http_policies']])
http_policies['index'] = ind + 1
vs_obj['http_policies'].append(http_policies)
avi_config['HTTPPolicySet'].append(policy)
def build_redirect_action_dict(self, redirect_url, enable_ssl):
redirect_url = self.parse_url(redirect_url)
protocol = str(redirect_url.scheme).upper()
hostname = str(redirect_url.hostname)
pathstring = str(redirect_url.path)
querystring = str(redirect_url.query)
full_path = '%s?%s' % (pathstring, querystring) if pathstring and \
querystring else pathstring
protocol = enable_ssl and 'HTTPS' or 'HTTP' if not protocol else \
protocol
action = {
'protocol': protocol
}
if hostname:
action.update({'host':
{
'type': 'URI_PARAM_TYPE_TOKENIZED',
'tokens': [{
'type': 'URI_TOKEN_TYPE_STRING',
'str_value': hostname,
'start_index': '0',
'end_index': '65535'
}]
}
})
if full_path:
action.update({'path':
{
'type': 'URI_PARAM_TYPE_TOKENIZED',
'tokens': [{
'type': 'URI_TOKEN_TYPE_STRING',
'str_value': full_path,
'start_index': '0',
'end_index': '65535'
}]
}
})
return action
def create_http_to_https_custom_profile(self):
return {
'name': "ns-migrate-http",
'type': "APPLICATION_PROFILE_TYPE_HTTP",
'tenant_ref': "/api/tenant/?name=admin",
'preserve_client_ip': False,
'http_profile': {
'max_rps_uri': 0,
'keepalive_header': False,
'max_rps_cip_uri': 0,
'x_forwarded_proto_enabled': False,
'connection_multiplexing_enabled': True,
'websockets_enabled': True,
'enable_request_body_buffering': False,
'hsts_enabled': False,
'xff_enabled': True,
'disable_keepalive_posts_msie6': True,
'keepalive_timeout': 30000,
'ssl_client_certificate_mode': "SSL_CLIENT_CERTIFICATE_NONE",
'http_to_https': True,
'max_bad_rps_cip_uri': 0,
'client_body_timeout': 30000,
'httponly_enabled': False,
'hsts_max_age': 365,
'max_bad_rps_cip': 0,
'server_side_redirect_to_https': False,
'client_max_header_size': 12,
'client_max_request_size': 48,
'max_rps_unknown_uri': 0,
'post_accept_timeout': 30000,
'client_header_timeout': 10000,
'secure_cookie_enabled': False,
'xff_alternate_name': "X-Forwarded-For",
'max_rps_cip': 0,
'client_max_body_size': 0,
'max_rps_unknown_cip': 0,
'allow_dots_in_header_name': False,
'max_bad_rps_uri': 0,
'use_app_keepalive_timeout': False
},
'dos_rl_profile': {
'rl_profile': {
'client_ip_connections_rate_limit': {
'explicit_tracking': False,
'action': {
'status_code': "HTTP_LOCAL_RESPONSE_STATUS_CODE_429",
'type': "RL_ACTION_NONE"
},
'fine_grain': False
}
},
'dos_profile': {
'thresh_period': 5
}
}
}
def correct_vs_ref(self, avi_config):
global csv_writer_dict_list
avi_graph = self.make_graph(avi_config)
csv_dict_sub = [row for row in csv_writer_dict_list if row[
'Netscaler Command'] not in ('add lb vserver',
'add cs vserver') and row[
'Status'] in (STATUS_PARTIAL,
STATUS_SUCCESSFUL)]
for dict_row in csv_dict_sub:
obj = dict_row['AVI Object']
if isinstance(obj, str) and obj.startswith('{'):
vs = []
if '__/__' in obj:
for dataobj in obj.split('__/__'):
obj = eval(dataobj)
self.add_vs_ref(obj, avi_graph, vs)
else:
obj = eval(obj)
self.add_vs_ref(obj, avi_graph, vs)
if vs:
dict_row['VS Reference'] = str(list(set(vs)))
else:
dict_row['VS Reference'] = STATUS_NOT_IN_USE
def add_vs_ref(self, obj, avi_graph, vs):
obj_name = obj.get('name', obj.get('hostname'))
if obj_name:
if avi_graph.has_node(obj_name):
LOG.debug("Checked predecessor for %s", obj_name)
predecessor = list(avi_graph.predecessors(obj_name))
if predecessor:
self.get_predecessor(predecessor, avi_graph, vs)
else:
LOG.debug("Object %s may be merged or orphaned", obj_name)
def get_predecessor(self, predecessor, avi_graph, vs):
if len(predecessor) > 1:
for node in predecessor:
nodelist = [node]
self.get_predecessor(nodelist, avi_graph, vs)
elif len(predecessor):
node_obj = [nod for nod in list(avi_graph.nodes().data()) if
nod[0] == predecessor[0]]
if node_obj and (node_obj[0][1]['type'] == 'VS' or 'VS' in node_obj[
0][1]['type']):
LOG.debug("Predecessor %s found", predecessor[0])
vs.extend(predecessor)
else:
LOG.debug("Checked predecessor for %s", predecessor[0])
nodelist = list(avi_graph.predecessors(predecessor[0]))
self.get_predecessor(nodelist, avi_graph, vs)
else:
LOG.debug("No more predecessor")
| true
| true
|
790c2dbc7cb02a3723270d55d4a995e8aef41d25
| 26,211
|
py
|
Python
|
tiled/server/core.py
|
martindurant/tiled
|
79eef6fb60964a726c0b43a280c6343b94097640
|
[
"BSD-3-Clause"
] | null | null | null |
tiled/server/core.py
|
martindurant/tiled
|
79eef6fb60964a726c0b43a280c6343b94097640
|
[
"BSD-3-Clause"
] | null | null | null |
tiled/server/core.py
|
martindurant/tiled
|
79eef6fb60964a726c0b43a280c6343b94097640
|
[
"BSD-3-Clause"
] | null | null | null |
import abc
import collections.abc
import contextlib
import dataclasses
import itertools
import math
import operator
import re
import sys
import time
from collections import defaultdict
from datetime import datetime, timedelta
from functools import lru_cache
from hashlib import md5
from typing import Any, Optional
import dateutil.tz
import msgpack
import orjson
import pydantic
from fastapi import Depends, HTTPException, Query, Request, Response
from starlette.responses import JSONResponse, Send, StreamingResponse
# These modules are not directly used, but they register things on import.
from .. import queries
from ..media_type_registration import (
serialization_registry as default_serialization_registry,
)
from ..queries import KeyLookup, QueryValueError
from ..query_registration import query_registry as default_query_registry
from ..trees.in_memory import Tree as TreeInMemory
from ..utils import (
APACHE_ARROW_FILE_MIME_TYPE,
SerializationError,
UnsupportedShape,
modules_available,
)
from . import models
from .authentication import get_current_user
from .etag import tokenize
del queries
if modules_available("numpy", "dask.array"):
from ..structures import array as _array # noqa: F401
del _array
if modules_available("pandas", "pyarrow", "dask.dataframe"):
from ..structures import dataframe as _dataframe # noqa: F401
del _dataframe
if modules_available("xarray"):
from ..structures import xarray as _xarray # noqa: F401
del _xarray
_FILTER_PARAM_PATTERN = re.compile(r"filter___(?P<name>.*)___(?P<field>[^\d\W][\w\d]+)")
_LOCAL_TZINFO = dateutil.tz.gettz()
@lru_cache(1)
def get_query_registry():
"This may be overridden via dependency_overrides."
return default_query_registry
@lru_cache(1)
def get_serialization_registry():
"This may be overridden via dependency_overrides."
return default_serialization_registry
def get_root_tree():
raise NotImplementedError(
"This should be overridden via dependency_overrides. "
"See tiled.server.app.serve_tree()."
)
def entry(
path: str,
request: Request,
current_user: str = Depends(get_current_user),
root_tree: pydantic.BaseSettings = Depends(get_root_tree),
):
path_parts = [segment for segment in path.split("/") if segment]
entry = root_tree.authenticated_as(current_user)
try:
# Traverse into sub-tree(s).
for segment in path_parts:
try:
with record_timing(request.state.metrics, "acl"):
unauthenticated_entry = entry[segment]
except (KeyError, TypeError):
raise NoEntry(path_parts)
# TODO Update this when Tree has structure_family == "tree".
if not hasattr(unauthenticated_entry, "structure_family"):
with record_timing(request.state.metrics, "acl"):
entry = unauthenticated_entry.authenticated_as(current_user)
else:
entry = unauthenticated_entry
return entry
except NoEntry:
raise HTTPException(status_code=404, detail=f"No such entry: {path_parts}")
def reader(
entry: Any = Depends(entry),
):
"Specify a path parameter and use it to look up a reader."
if not isinstance(entry, DuckReader):
raise HTTPException(status_code=404, detail="This is not a Reader.")
return entry
def block(
# Ellipsis as the "default" tells FastAPI to make this parameter required.
block: str = Query(..., regex="^[0-9]*(,[0-9]+)*$"),
):
"Specify and parse a block index parameter."
if not block:
return ()
return tuple(map(int, block.split(",")))
def expected_shape(
expected_shape: Optional[str] = Query(
None, min_length=1, regex="^[0-9]+(,[0-9]+)*$|^scalar$"
),
):
"Specify and parse an expected_shape parameter."
if expected_shape is None:
return
if expected_shape == "scalar":
return ()
return tuple(map(int, expected_shape.split(",")))
def slice_(
slice: str = Query(None, regex="^[0-9,:]*$"),
):
"Specify and parse a block index parameter."
import numpy
# IMPORTANT We are eval-ing a user-provider string here so we need to be
# very careful about locking down what can be in it. The regex above
# excludes any letters or operators, so it is not possible to execute
# functions or expensive arithmetic.
return tuple(
[
eval(f"numpy.s_[{dim!s}]", {"numpy": numpy})
for dim in (slice or "").split(",")
if dim
]
)
def len_or_approx(tree):
"""
Prefer approximate length if implemented. (It's cheaper.)
"""
try:
return operator.length_hint(tree)
except TypeError:
return len(tree)
def pagination_links(route, path_parts, offset, limit, length_hint):
path_str = "/".join(path_parts)
links = {
"self": f"{route}/{path_str}?page[offset]={offset}&page[limit]={limit}",
# These are conditionally overwritten below.
"first": None,
"last": None,
"next": None,
"prev": None,
}
if limit:
last_page = math.floor(length_hint / limit) * limit
links.update(
{
"first": f"{route}/{path_str}?page[offset]={0}&page[limit]={limit}",
"last": f"{route}/{path_str}?page[offset]={last_page}&page[limit]={limit}",
}
)
if offset + limit < length_hint:
links[
"next"
] = f"{route}/{path_str}?page[offset]={offset + limit}&page[limit]={limit}"
if offset > 0:
links[
"prev"
] = f"{route}/{path_str}?page[offset]={max(0, offset - limit)}&page[limit]={limit}"
return links
class DuckReader(metaclass=abc.ABCMeta):
"""
Used for isinstance(obj, DuckReader):
"""
@classmethod
def __subclasshook__(cls, candidate):
# If the following condition is True, candidate is recognized
# to "quack" like a Reader.
EXPECTED_ATTRS = ("read", "macrostructure", "microstructure")
return all(hasattr(candidate, attr) for attr in EXPECTED_ATTRS)
class DuckTree(metaclass=abc.ABCMeta):
"""
Used for isinstance(obj, DuckTree):
"""
@classmethod
def __subclasshook__(cls, candidate):
# If the following condition is True, candidate is recognized
# to "quack" like a Tree.
EXPECTED_ATTRS = ("__getitem__", "__iter__")
return all(hasattr(candidate, attr) for attr in EXPECTED_ATTRS)
def construct_entries_response(
query_registry, tree, route, path, offset, limit, fields, filters, sort, base_url
):
path_parts = [segment for segment in path.split("/") if segment]
if not isinstance(tree, DuckTree):
raise WrongTypeForRoute("This is not a Tree.")
queries = defaultdict(
dict
) # e.g. {"text": {"text": "dog"}, "lookup": {"key": "..."}}
# Group the parameters by query type.
for key, value in filters.items():
if value is None:
continue
name, field = _FILTER_PARAM_PATTERN.match(key).groups()
queries[name][field] = value
sorting = []
if sort is not None:
for item in sort.split(","):
if item:
if item.startswith("-"):
sorting.append((item[1:], -1))
else:
sorting.append((item, 1))
if sorting:
if not hasattr(tree, "sort"):
raise HTTPException(
status_code=400, detail="This Tree does not support sorting."
)
tree = tree.sort(sorting)
# Apply the queries and obtain a narrowed tree.
key_lookups = []
for query_name, parameters_dict_of_lists in queries.items():
for i in itertools.count(0):
try:
parameters = {
field_name: parameters_list[i]
for field_name, parameters_list in parameters_dict_of_lists.items()
}
except IndexError:
break
query_class = query_registry.name_to_query_type[query_name]
# Special case:
# List fields are serialized as comma-separated strings.
for field in dataclasses.fields(query_class):
if getattr(field.type, "__origin__", None) is list:
(inner_type,) = field.type.__args__
parameters[field.name] = [
inner_type(item) for item in parameters[field.name].split(",")
]
try:
query = query_class(**parameters)
# Special case: Do key-lookups at the end after all other filtering.
# We do not require trees to implement this query; we implement it
# directly here by just calling __getitem__.
if isinstance(query, KeyLookup):
key_lookups.append(query.key)
continue
tree = tree.search(query)
except QueryValueError as err:
raise HTTPException(status_code=400, detail=err.args[0])
if key_lookups:
# Duplicates are technically legal because *any* query can be given
# with multiple parameters.
unique_key_lookups = set(key_lookups)
(key_lookup), *others = unique_key_lookups
if others:
# Two non-equal KeyLookup queries must return no results.
tree = TreeInMemory({})
else:
try:
tree = TreeInMemory(
{key_lookup: tree[key_lookup]}, must_revalidate=False
)
except KeyError:
tree = TreeInMemory({})
count = len_or_approx(tree)
links = pagination_links(route, path_parts, offset, limit, count)
data = []
if fields != [models.EntryFields.none]:
# Pull a page of items into memory.
items = tree.items_indexer[offset : offset + limit] # noqa: E203
else:
# Pull a page of just the keys, which is cheaper.
items = (
(key, None)
for key in tree.keys_indexer[offset : offset + limit] # noqa: E203
)
# This value will not leak out. It just used to seed comparisons.
metadata_stale_at = datetime.utcnow() + timedelta(days=1_000_000)
must_revalidate = getattr(tree, "must_revalidate", True)
for key, entry in items:
resource = construct_resource(base_url, path_parts + [key], entry, fields)
data.append(resource)
# If any entry has emtry.metadata_stale_at = None, then there will
# be no 'Expires' header. We will pessimistically assume the values
# are immediately stale.
if metadata_stale_at is not None:
if getattr(entry, "metadata_stale_at", None) is None:
metadata_stale_at = None
else:
metadata_stale_at = min(metadata_stale_at, entry.metadata_stale_at)
return (
models.Response(data=data, links=links, meta={"count": count}),
metadata_stale_at,
must_revalidate,
)
DEFAULT_MEDIA_TYPES = {
"array": "application/octet-stream",
"dataframe": APACHE_ARROW_FILE_MIME_TYPE,
"structured_array_tabular": "application/octet-stream",
"structured_array_generic": "application/octet-stream",
"variable": "application/octet-stream",
"data_array": "application/octet-stream",
"dataset": "application/netcdf",
}
def construct_data_response(
structure_family,
serialization_registry,
payload,
metadata,
request,
format=None,
specs=None,
expires=None,
):
request.state.endpoint = "data"
if specs is None:
specs = []
default_media_type = DEFAULT_MEDIA_TYPES[structure_family]
# Give priority to the `format` query parameter. Otherwise, consult Accept
# header.
if format is not None:
media_types_or_aliases = format.split(",")
# Resolve aliases, like "csv" -> "text/csv".
media_types = [
serialization_registry.resolve_alias(t) for t in media_types_or_aliases
]
else:
# The HTTP spec says these should be separated by ", " but some
# browsers separate with just "," (no space).
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Content_negotiation/List_of_default_Accept_values#default_values # noqa
# That variation is what we are handling below with lstrip.
media_types = [
s.lstrip(" ")
for s in request.headers.get("Accept", default_media_type).split(",")
]
# The client may give us a choice of media types. Find the first one
# that we support.
supported = set()
for media_type in media_types:
if media_type == "*/*":
media_type = default_media_type
# fall back to generic dataframe serializer if no specs present
for spec in specs + [structure_family]:
media_types_for_spec = serialization_registry.media_types(spec)
if media_type in media_types_for_spec:
break
supported.update(media_types_for_spec)
else:
# None of the specs or the structure_family can serialize to this
# media_type. Try the next one.
continue
# We found a match above. We have our media_type.
break
else:
# We have checked each of the media_types, and we cannot serialize
# to any of them.
raise UnsupportedMediaTypes(
f"None of the media types requested by the client are supported. "
f"Supported: {', '.join(supported)}. Requested: {', '.join(media_types)}.",
)
with record_timing(request.state.metrics, "tok"):
# Create an ETag that uniquely identifies this content and the media
# type that it will be encoded as.
etag = tokenize((payload, media_type))
headers = {"ETag": etag}
if expires is not None:
headers["Expires"] = expires.strftime(HTTP_EXPIRES_HEADER_FORMAT)
if request.headers.get("If-None-Match", "") == etag:
# If the client already has this content, confirm that.
return Response(status_code=304, headers=headers)
# This is the expensive step: actually serialize.
try:
content = serialization_registry(
structure_family, media_type, payload, metadata
)
except UnsupportedShape as err:
raise UnsupportedMediaTypes(
f"The shape of this data {err.args[0]} is incompatible with the requested format ({media_type}). "
f"Slice it or choose a different format.",
)
except SerializationError:
raise UnsupportedMediaTypes(
"This type is supported in general but there was an unknown error packing this specific data.",
)
return PatchedResponse(
content=content,
media_type=media_type,
headers=headers,
)
def construct_resource(base_url, path_parts, entry, fields):
path_str = "/".join(path_parts)
attributes = {}
if models.EntryFields.metadata in fields:
attributes["metadata"] = entry.metadata
if models.EntryFields.specs in fields:
attributes["specs"] = getattr(entry, "specs", None)
if isinstance(entry, DuckTree):
if models.EntryFields.count in fields:
attributes["count"] = len_or_approx(entry)
if hasattr(entry, "sorting"):
attributes["sorting"] = entry.sorting
resource = models.TreeResource(
**{
"id": path_parts[-1] if path_parts else "",
"attributes": models.TreeAttributes(**attributes),
"type": models.EntryType.tree,
"links": {
"self": f"{base_url}metadata/{path_str}",
"search": f"{base_url}search/{path_str}",
},
}
)
else:
links = {"self": f"{base_url}metadata/{path_str}"}
structure = {}
if entry is not None:
# entry is None when we are pulling just *keys* from the
# Tree and not values.
links.update(
{
link: template.format(base_url=base_url, path=path_str)
for link, template in FULL_LINKS[entry.structure_family].items()
}
)
if models.EntryFields.structure_family in fields:
attributes["structure_family"] = entry.structure_family
if models.EntryFields.macrostructure in fields:
macrostructure = entry.macrostructure()
if macrostructure is not None:
structure["macro"] = dataclasses.asdict(macrostructure)
if models.EntryFields.microstructure in fields:
if entry.structure_family == "dataframe":
# Special case: its microstructure is cannot be JSON-serialized
# and is therefore available from separate routes. Sends links
# instead of the actual payload.
structure["micro"] = {
"links": {
"meta": f"{base_url}dataframe/meta/{path_str}",
"divisions": f"{base_url}dataframe/divisions/{path_str}",
}
}
else:
microstructure = entry.microstructure()
if microstructure is not None:
structure["micro"] = dataclasses.asdict(microstructure)
if entry.structure_family == "array":
block_template = ",".join(
f"{{index_{index}}}"
for index in range(len(structure["macro"]["shape"]))
)
links[
"block"
] = f"{base_url}array/block/{path_str}?block={block_template}"
elif entry.structure_family == "dataframe":
links[
"partition"
] = f"{base_url}dataframe/partition/{path_str}?partition={{index}}"
elif entry.structure_family == "variable":
block_template = ",".join(
f"{{index_{index}}}"
for index in range(
len(structure["macro"]["data"]["macro"]["shape"])
)
)
links[
"block"
] = f"{base_url}variable/block/{path_str}?block={block_template}"
elif entry.structure_family == "data_array":
block_template = ",".join(
f"{{index_{index}}}"
for index in range(
len(structure["macro"]["variable"]["macro"]["data"])
)
)
links[
"block"
] = f"{base_url}data_array/block/{path_str}?block={block_template}"
elif entry.structure_family == "dataset":
links[
"block"
] = f"{base_url}dataset/block/{path_str}?variable={{variable}}&block={{block_indexes}}"
microstructure = entry.microstructure()
attributes["structure"] = structure
resource = models.ReaderResource(
**{
"id": path_parts[-1],
"attributes": models.ReaderAttributes(**attributes),
"type": models.EntryType.reader,
"links": links,
}
)
return resource
class PatchedResponse(Response):
"Patch the render method to accept memoryview."
def render(self, content: Any) -> bytes:
if isinstance(content, memoryview):
return content.cast("B")
return super().render(content)
class PatchedStreamingResponse(StreamingResponse):
"Patch the stream_response method to accept memoryview."
async def stream_response(self, send: Send) -> None:
await send(
{
"type": "http.response.start",
"status": self.status_code,
"headers": self.raw_headers,
}
)
async for chunk in self.body_iterator:
# BEGIN ALTERATION
if not isinstance(chunk, (bytes, memoryview)):
# END ALTERATION
chunk = chunk.encode(self.charset)
await send({"type": "http.response.body", "body": chunk, "more_body": True})
await send({"type": "http.response.body", "body": b"", "more_body": False})
class NumpySafeJSONResponse(JSONResponse):
def __init__(self, *args, metrics, **kwargs):
self.__metrics = metrics
super().__init__(*args, **kwargs)
def render(self, content: Any) -> bytes:
with record_timing(self.__metrics, "pack"):
return orjson.dumps(content, option=orjson.OPT_SERIALIZE_NUMPY)
def _numpy_safe_msgpack_encoder(obj):
# If numpy has not been imported yet, then we can be sure that obj
# is not a numpy object, and we want to avoid triggering a numpy
# import. (The server does not have a hard numpy dependency.)
if "numpy" in sys.modules:
import numpy
if isinstance(obj, (numpy.generic, numpy.ndarray)):
if numpy.isscalar(obj):
return obj.item()
return obj.tolist()
return obj
def _patch_naive_datetimes(obj):
"""
If a naive datetime is found, attach local time.
Msgpack can only serialize datetimes with tzinfo.
"""
if hasattr(obj, "items"):
patched_obj = {}
for k, v in obj.items():
patched_obj[k] = _patch_naive_datetimes(v)
elif (not isinstance(obj, str)) and isinstance(obj, collections.abc.Iterable):
patched_obj = []
for item in obj:
patched_obj.append(_patch_naive_datetimes(item))
elif isinstance(obj, datetime) and obj.tzinfo is None:
patched_obj = obj.astimezone(_LOCAL_TZINFO)
else:
patched_obj = obj
return patched_obj
class MsgpackResponse(Response):
media_type = "application/x-msgpack"
def __init__(self, *args, metrics, **kwargs):
self.__metrics = metrics
super().__init__(*args, **kwargs)
def render(self, content: Any, _reentered=False) -> bytes:
try:
with record_timing(self.__metrics, "pack"):
return msgpack.packb(
content, default=_numpy_safe_msgpack_encoder, datetime=True
)
except TypeError as err:
# msgpack tries to handle all datetimes, but if it
# received a naive one (tzinfo=None) then it fails.
# We cannot use the default hook to handle this because
# it is not called.
if err.args == ("can not serialize 'datetime.datetime' object",) and (
not _reentered
):
patched_content = _patch_naive_datetimes(content)
return self.render(patched_content, _reentered=True)
raise
JSON_MIME_TYPE = "application/json"
MSGPACK_MIME_TYPE = "application/x-msgpack"
# This is a silly time format, but it is the HTTP standard.
HTTP_EXPIRES_HEADER_FORMAT = "%a, %d %b %Y %H:%M:%S GMT"
def json_or_msgpack(request, content, expires=None, headers=None):
media_types = request.headers.get("Accept", JSON_MIME_TYPE).split(", ")
for media_type in media_types:
if media_type == "*/*":
media_type = JSON_MIME_TYPE
break
if media_type == MSGPACK_MIME_TYPE:
break
if media_type == JSON_MIME_TYPE:
break
else:
# It is commmon in HTTP to fall back on a default representation if
# none of the requested ones are available. We do not do this for
# data payloads, but it makes some sense to do it for these metadata
# messages.
media_type = JSON_MIME_TYPE
assert media_type in {JSON_MIME_TYPE, MSGPACK_MIME_TYPE}
content_as_dict = content.dict()
with record_timing(request.state.metrics, "tok"):
etag = md5(str(content_as_dict).encode()).hexdigest()
headers = headers or {}
headers["ETag"] = etag
if expires is not None:
headers["Expires"] = expires.strftime(HTTP_EXPIRES_HEADER_FORMAT)
if request.headers.get("If-None-Match", "") == etag:
# If the client already has this content, confirm that.
return Response(status_code=304, headers=headers)
if media_type == "application/x-msgpack":
return MsgpackResponse(
content_as_dict, headers=headers, metrics=request.state.metrics
)
return NumpySafeJSONResponse(
content_as_dict, headers=headers, metrics=request.state.metrics
)
class UnsupportedMediaTypes(Exception):
pass
class NoEntry(KeyError):
pass
class WrongTypeForRoute(Exception):
pass
FULL_LINKS = {
"array": {"full": "{base_url}array/full/{path}"},
"structured_array_generic": {
"full": "{base_url}structured_array_generic/full/{path}"
},
"structured_array_tabular": {
"full": "{base_url}structured_array_tabular/full/{path}"
},
"dataframe": {"full": "{base_url}dataframe/full/{path}"},
"variable": {"full": "{base_url}variable/full/{path}"},
"data_array": {"full_variable": "{base_url}data_array/variable/full/{path}"},
"dataset": {
"full_variable": "{base_url}dataset/data_var/full/{path}?variable={{variable}}",
"full_coordinate": "{base_url}dataset/coord/full/{path}?variable={{variable}}",
"full_dataset": "{base_url}dataset/full/{path}",
},
}
@contextlib.contextmanager
def record_timing(metrics, key):
"""
Set timings[key] equal to the run time (in milliseconds) of the context body.
"""
t0 = time.perf_counter()
yield
metrics[key]["dur"] += time.perf_counter() - t0 # Units: seconds
| 36.556485
| 132
| 0.602877
|
import abc
import collections.abc
import contextlib
import dataclasses
import itertools
import math
import operator
import re
import sys
import time
from collections import defaultdict
from datetime import datetime, timedelta
from functools import lru_cache
from hashlib import md5
from typing import Any, Optional
import dateutil.tz
import msgpack
import orjson
import pydantic
from fastapi import Depends, HTTPException, Query, Request, Response
from starlette.responses import JSONResponse, Send, StreamingResponse
from .. import queries
from ..media_type_registration import (
serialization_registry as default_serialization_registry,
)
from ..queries import KeyLookup, QueryValueError
from ..query_registration import query_registry as default_query_registry
from ..trees.in_memory import Tree as TreeInMemory
from ..utils import (
APACHE_ARROW_FILE_MIME_TYPE,
SerializationError,
UnsupportedShape,
modules_available,
)
from . import models
from .authentication import get_current_user
from .etag import tokenize
del queries
if modules_available("numpy", "dask.array"):
from ..structures import array as _array
del _array
if modules_available("pandas", "pyarrow", "dask.dataframe"):
from ..structures import dataframe as _dataframe
del _dataframe
if modules_available("xarray"):
from ..structures import xarray as _xarray
del _xarray
_FILTER_PARAM_PATTERN = re.compile(r"filter___(?P<name>.*)___(?P<field>[^\d\W][\w\d]+)")
_LOCAL_TZINFO = dateutil.tz.gettz()
@lru_cache(1)
def get_query_registry():
return default_query_registry
@lru_cache(1)
def get_serialization_registry():
return default_serialization_registry
def get_root_tree():
raise NotImplementedError(
"This should be overridden via dependency_overrides. "
"See tiled.server.app.serve_tree()."
)
def entry(
path: str,
request: Request,
current_user: str = Depends(get_current_user),
root_tree: pydantic.BaseSettings = Depends(get_root_tree),
):
path_parts = [segment for segment in path.split("/") if segment]
entry = root_tree.authenticated_as(current_user)
try:
for segment in path_parts:
try:
with record_timing(request.state.metrics, "acl"):
unauthenticated_entry = entry[segment]
except (KeyError, TypeError):
raise NoEntry(path_parts)
if not hasattr(unauthenticated_entry, "structure_family"):
with record_timing(request.state.metrics, "acl"):
entry = unauthenticated_entry.authenticated_as(current_user)
else:
entry = unauthenticated_entry
return entry
except NoEntry:
raise HTTPException(status_code=404, detail=f"No such entry: {path_parts}")
def reader(
entry: Any = Depends(entry),
):
if not isinstance(entry, DuckReader):
raise HTTPException(status_code=404, detail="This is not a Reader.")
return entry
def block(
block: str = Query(..., regex="^[0-9]*(,[0-9]+)*$"),
):
if not block:
return ()
return tuple(map(int, block.split(",")))
def expected_shape(
expected_shape: Optional[str] = Query(
None, min_length=1, regex="^[0-9]+(,[0-9]+)*$|^scalar$"
),
):
if expected_shape is None:
return
if expected_shape == "scalar":
return ()
return tuple(map(int, expected_shape.split(",")))
def slice_(
slice: str = Query(None, regex="^[0-9,:]*$"),
):
import numpy
return tuple(
[
eval(f"numpy.s_[{dim!s}]", {"numpy": numpy})
for dim in (slice or "").split(",")
if dim
]
)
def len_or_approx(tree):
try:
return operator.length_hint(tree)
except TypeError:
return len(tree)
def pagination_links(route, path_parts, offset, limit, length_hint):
path_str = "/".join(path_parts)
links = {
"self": f"{route}/{path_str}?page[offset]={offset}&page[limit]={limit}",
"first": None,
"last": None,
"next": None,
"prev": None,
}
if limit:
last_page = math.floor(length_hint / limit) * limit
links.update(
{
"first": f"{route}/{path_str}?page[offset]={0}&page[limit]={limit}",
"last": f"{route}/{path_str}?page[offset]={last_page}&page[limit]={limit}",
}
)
if offset + limit < length_hint:
links[
"next"
] = f"{route}/{path_str}?page[offset]={offset + limit}&page[limit]={limit}"
if offset > 0:
links[
"prev"
] = f"{route}/{path_str}?page[offset]={max(0, offset - limit)}&page[limit]={limit}"
return links
class DuckReader(metaclass=abc.ABCMeta):
@classmethod
def __subclasshook__(cls, candidate):
EXPECTED_ATTRS = ("read", "macrostructure", "microstructure")
return all(hasattr(candidate, attr) for attr in EXPECTED_ATTRS)
class DuckTree(metaclass=abc.ABCMeta):
@classmethod
def __subclasshook__(cls, candidate):
EXPECTED_ATTRS = ("__getitem__", "__iter__")
return all(hasattr(candidate, attr) for attr in EXPECTED_ATTRS)
def construct_entries_response(
query_registry, tree, route, path, offset, limit, fields, filters, sort, base_url
):
path_parts = [segment for segment in path.split("/") if segment]
if not isinstance(tree, DuckTree):
raise WrongTypeForRoute("This is not a Tree.")
queries = defaultdict(
dict
)
for key, value in filters.items():
if value is None:
continue
name, field = _FILTER_PARAM_PATTERN.match(key).groups()
queries[name][field] = value
sorting = []
if sort is not None:
for item in sort.split(","):
if item:
if item.startswith("-"):
sorting.append((item[1:], -1))
else:
sorting.append((item, 1))
if sorting:
if not hasattr(tree, "sort"):
raise HTTPException(
status_code=400, detail="This Tree does not support sorting."
)
tree = tree.sort(sorting)
key_lookups = []
for query_name, parameters_dict_of_lists in queries.items():
for i in itertools.count(0):
try:
parameters = {
field_name: parameters_list[i]
for field_name, parameters_list in parameters_dict_of_lists.items()
}
except IndexError:
break
query_class = query_registry.name_to_query_type[query_name]
for field in dataclasses.fields(query_class):
if getattr(field.type, "__origin__", None) is list:
(inner_type,) = field.type.__args__
parameters[field.name] = [
inner_type(item) for item in parameters[field.name].split(",")
]
try:
query = query_class(**parameters)
if isinstance(query, KeyLookup):
key_lookups.append(query.key)
continue
tree = tree.search(query)
except QueryValueError as err:
raise HTTPException(status_code=400, detail=err.args[0])
if key_lookups:
unique_key_lookups = set(key_lookups)
(key_lookup), *others = unique_key_lookups
if others:
tree = TreeInMemory({})
else:
try:
tree = TreeInMemory(
{key_lookup: tree[key_lookup]}, must_revalidate=False
)
except KeyError:
tree = TreeInMemory({})
count = len_or_approx(tree)
links = pagination_links(route, path_parts, offset, limit, count)
data = []
if fields != [models.EntryFields.none]:
items = tree.items_indexer[offset : offset + limit]
else:
items = (
(key, None)
for key in tree.keys_indexer[offset : offset + limit]
)
metadata_stale_at = datetime.utcnow() + timedelta(days=1_000_000)
must_revalidate = getattr(tree, "must_revalidate", True)
for key, entry in items:
resource = construct_resource(base_url, path_parts + [key], entry, fields)
data.append(resource)
if metadata_stale_at is not None:
if getattr(entry, "metadata_stale_at", None) is None:
metadata_stale_at = None
else:
metadata_stale_at = min(metadata_stale_at, entry.metadata_stale_at)
return (
models.Response(data=data, links=links, meta={"count": count}),
metadata_stale_at,
must_revalidate,
)
DEFAULT_MEDIA_TYPES = {
"array": "application/octet-stream",
"dataframe": APACHE_ARROW_FILE_MIME_TYPE,
"structured_array_tabular": "application/octet-stream",
"structured_array_generic": "application/octet-stream",
"variable": "application/octet-stream",
"data_array": "application/octet-stream",
"dataset": "application/netcdf",
}
def construct_data_response(
structure_family,
serialization_registry,
payload,
metadata,
request,
format=None,
specs=None,
expires=None,
):
request.state.endpoint = "data"
if specs is None:
specs = []
default_media_type = DEFAULT_MEDIA_TYPES[structure_family]
if format is not None:
media_types_or_aliases = format.split(",")
media_types = [
serialization_registry.resolve_alias(t) for t in media_types_or_aliases
]
else:
= [
s.lstrip(" ")
for s in request.headers.get("Accept", default_media_type).split(",")
]
supported = set()
for media_type in media_types:
if media_type == "*/*":
media_type = default_media_type
for spec in specs + [structure_family]:
media_types_for_spec = serialization_registry.media_types(spec)
if media_type in media_types_for_spec:
break
supported.update(media_types_for_spec)
else:
continue
break
else:
raise UnsupportedMediaTypes(
f"None of the media types requested by the client are supported. "
f"Supported: {', '.join(supported)}. Requested: {', '.join(media_types)}.",
)
with record_timing(request.state.metrics, "tok"):
etag = tokenize((payload, media_type))
headers = {"ETag": etag}
if expires is not None:
headers["Expires"] = expires.strftime(HTTP_EXPIRES_HEADER_FORMAT)
if request.headers.get("If-None-Match", "") == etag:
return Response(status_code=304, headers=headers)
try:
content = serialization_registry(
structure_family, media_type, payload, metadata
)
except UnsupportedShape as err:
raise UnsupportedMediaTypes(
f"The shape of this data {err.args[0]} is incompatible with the requested format ({media_type}). "
f"Slice it or choose a different format.",
)
except SerializationError:
raise UnsupportedMediaTypes(
"This type is supported in general but there was an unknown error packing this specific data.",
)
return PatchedResponse(
content=content,
media_type=media_type,
headers=headers,
)
def construct_resource(base_url, path_parts, entry, fields):
path_str = "/".join(path_parts)
attributes = {}
if models.EntryFields.metadata in fields:
attributes["metadata"] = entry.metadata
if models.EntryFields.specs in fields:
attributes["specs"] = getattr(entry, "specs", None)
if isinstance(entry, DuckTree):
if models.EntryFields.count in fields:
attributes["count"] = len_or_approx(entry)
if hasattr(entry, "sorting"):
attributes["sorting"] = entry.sorting
resource = models.TreeResource(
**{
"id": path_parts[-1] if path_parts else "",
"attributes": models.TreeAttributes(**attributes),
"type": models.EntryType.tree,
"links": {
"self": f"{base_url}metadata/{path_str}",
"search": f"{base_url}search/{path_str}",
},
}
)
else:
links = {"self": f"{base_url}metadata/{path_str}"}
structure = {}
if entry is not None:
links.update(
{
link: template.format(base_url=base_url, path=path_str)
for link, template in FULL_LINKS[entry.structure_family].items()
}
)
if models.EntryFields.structure_family in fields:
attributes["structure_family"] = entry.structure_family
if models.EntryFields.macrostructure in fields:
macrostructure = entry.macrostructure()
if macrostructure is not None:
structure["macro"] = dataclasses.asdict(macrostructure)
if models.EntryFields.microstructure in fields:
if entry.structure_family == "dataframe":
structure["micro"] = {
"links": {
"meta": f"{base_url}dataframe/meta/{path_str}",
"divisions": f"{base_url}dataframe/divisions/{path_str}",
}
}
else:
microstructure = entry.microstructure()
if microstructure is not None:
structure["micro"] = dataclasses.asdict(microstructure)
if entry.structure_family == "array":
block_template = ",".join(
f"{{index_{index}}}"
for index in range(len(structure["macro"]["shape"]))
)
links[
"block"
] = f"{base_url}array/block/{path_str}?block={block_template}"
elif entry.structure_family == "dataframe":
links[
"partition"
] = f"{base_url}dataframe/partition/{path_str}?partition={{index}}"
elif entry.structure_family == "variable":
block_template = ",".join(
f"{{index_{index}}}"
for index in range(
len(structure["macro"]["data"]["macro"]["shape"])
)
)
links[
"block"
] = f"{base_url}variable/block/{path_str}?block={block_template}"
elif entry.structure_family == "data_array":
block_template = ",".join(
f"{{index_{index}}}"
for index in range(
len(structure["macro"]["variable"]["macro"]["data"])
)
)
links[
"block"
] = f"{base_url}data_array/block/{path_str}?block={block_template}"
elif entry.structure_family == "dataset":
links[
"block"
] = f"{base_url}dataset/block/{path_str}?variable={{variable}}&block={{block_indexes}}"
microstructure = entry.microstructure()
attributes["structure"] = structure
resource = models.ReaderResource(
**{
"id": path_parts[-1],
"attributes": models.ReaderAttributes(**attributes),
"type": models.EntryType.reader,
"links": links,
}
)
return resource
class PatchedResponse(Response):
def render(self, content: Any) -> bytes:
if isinstance(content, memoryview):
return content.cast("B")
return super().render(content)
class PatchedStreamingResponse(StreamingResponse):
async def stream_response(self, send: Send) -> None:
await send(
{
"type": "http.response.start",
"status": self.status_code,
"headers": self.raw_headers,
}
)
async for chunk in self.body_iterator:
if not isinstance(chunk, (bytes, memoryview)):
chunk = chunk.encode(self.charset)
await send({"type": "http.response.body", "body": chunk, "more_body": True})
await send({"type": "http.response.body", "body": b"", "more_body": False})
class NumpySafeJSONResponse(JSONResponse):
def __init__(self, *args, metrics, **kwargs):
self.__metrics = metrics
super().__init__(*args, **kwargs)
def render(self, content: Any) -> bytes:
with record_timing(self.__metrics, "pack"):
return orjson.dumps(content, option=orjson.OPT_SERIALIZE_NUMPY)
def _numpy_safe_msgpack_encoder(obj):
if "numpy" in sys.modules:
import numpy
if isinstance(obj, (numpy.generic, numpy.ndarray)):
if numpy.isscalar(obj):
return obj.item()
return obj.tolist()
return obj
def _patch_naive_datetimes(obj):
if hasattr(obj, "items"):
patched_obj = {}
for k, v in obj.items():
patched_obj[k] = _patch_naive_datetimes(v)
elif (not isinstance(obj, str)) and isinstance(obj, collections.abc.Iterable):
patched_obj = []
for item in obj:
patched_obj.append(_patch_naive_datetimes(item))
elif isinstance(obj, datetime) and obj.tzinfo is None:
patched_obj = obj.astimezone(_LOCAL_TZINFO)
else:
patched_obj = obj
return patched_obj
class MsgpackResponse(Response):
media_type = "application/x-msgpack"
def __init__(self, *args, metrics, **kwargs):
self.__metrics = metrics
super().__init__(*args, **kwargs)
def render(self, content: Any, _reentered=False) -> bytes:
try:
with record_timing(self.__metrics, "pack"):
return msgpack.packb(
content, default=_numpy_safe_msgpack_encoder, datetime=True
)
except TypeError as err:
if err.args == ("can not serialize 'datetime.datetime' object",) and (
not _reentered
):
patched_content = _patch_naive_datetimes(content)
return self.render(patched_content, _reentered=True)
raise
JSON_MIME_TYPE = "application/json"
MSGPACK_MIME_TYPE = "application/x-msgpack"
HTTP_EXPIRES_HEADER_FORMAT = "%a, %d %b %Y %H:%M:%S GMT"
def json_or_msgpack(request, content, expires=None, headers=None):
media_types = request.headers.get("Accept", JSON_MIME_TYPE).split(", ")
for media_type in media_types:
if media_type == "*/*":
media_type = JSON_MIME_TYPE
break
if media_type == MSGPACK_MIME_TYPE:
break
if media_type == JSON_MIME_TYPE:
break
else:
media_type = JSON_MIME_TYPE
assert media_type in {JSON_MIME_TYPE, MSGPACK_MIME_TYPE}
content_as_dict = content.dict()
with record_timing(request.state.metrics, "tok"):
etag = md5(str(content_as_dict).encode()).hexdigest()
headers = headers or {}
headers["ETag"] = etag
if expires is not None:
headers["Expires"] = expires.strftime(HTTP_EXPIRES_HEADER_FORMAT)
if request.headers.get("If-None-Match", "") == etag:
return Response(status_code=304, headers=headers)
if media_type == "application/x-msgpack":
return MsgpackResponse(
content_as_dict, headers=headers, metrics=request.state.metrics
)
return NumpySafeJSONResponse(
content_as_dict, headers=headers, metrics=request.state.metrics
)
class UnsupportedMediaTypes(Exception):
pass
class NoEntry(KeyError):
pass
class WrongTypeForRoute(Exception):
pass
FULL_LINKS = {
"array": {"full": "{base_url}array/full/{path}"},
"structured_array_generic": {
"full": "{base_url}structured_array_generic/full/{path}"
},
"structured_array_tabular": {
"full": "{base_url}structured_array_tabular/full/{path}"
},
"dataframe": {"full": "{base_url}dataframe/full/{path}"},
"variable": {"full": "{base_url}variable/full/{path}"},
"data_array": {"full_variable": "{base_url}data_array/variable/full/{path}"},
"dataset": {
"full_variable": "{base_url}dataset/data_var/full/{path}?variable={{variable}}",
"full_coordinate": "{base_url}dataset/coord/full/{path}?variable={{variable}}",
"full_dataset": "{base_url}dataset/full/{path}",
},
}
@contextlib.contextmanager
def record_timing(metrics, key):
t0 = time.perf_counter()
yield
metrics[key]["dur"] += time.perf_counter() - t0
| true
| true
|
790c2fa177c9b097d62a35b7ab947d349f89641a
| 19
|
py
|
Python
|
app/PoziConnect/version.py
|
pozi/PoziConnect
|
375ca0f5ab3a44cc898b85a12ba54ab1cf0f4f61
|
[
"MIT"
] | null | null | null |
app/PoziConnect/version.py
|
pozi/PoziConnect
|
375ca0f5ab3a44cc898b85a12ba54ab1cf0f4f61
|
[
"MIT"
] | null | null | null |
app/PoziConnect/version.py
|
pozi/PoziConnect
|
375ca0f5ab3a44cc898b85a12ba54ab1cf0f4f61
|
[
"MIT"
] | null | null | null |
version = '2.9.0'
| 9.5
| 18
| 0.526316
|
version = '2.9.0'
| true
| true
|
790c2ff2c2b998e32008151289a2b5c36e39c393
| 16,292
|
py
|
Python
|
tutorials/autotvm/tune_relay_mobile_gpu.py
|
Exhorder6/tvm
|
7e3f068373937c0ae08d58f67b84030a027db1c9
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 90
|
2019-01-26T00:38:49.000Z
|
2022-03-11T23:12:34.000Z
|
tutorials/autotvm/tune_relay_mobile_gpu.py
|
Exhorder6/tvm
|
7e3f068373937c0ae08d58f67b84030a027db1c9
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 91
|
2019-02-27T00:17:01.000Z
|
2022-02-21T18:08:21.000Z
|
tutorials/autotvm/tune_relay_mobile_gpu.py
|
Exhorder6/tvm
|
7e3f068373937c0ae08d58f67b84030a027db1c9
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 41
|
2019-01-28T14:37:03.000Z
|
2022-03-31T03:58:57.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-tuning a Convolutional Network for Mobile GPU
==================================================
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, `Eddie Yan <https://github.com/eqy>`_
Auto-tuning for a specific device is critical for getting the best
performance. This is a tutorial about how to tune a whole convolutional
network.
The operator implementation for Mobile GPU in TVM is written in template form.
The template has many tunable knobs (tile factor, vectorization, unrolling, etc).
We will tune all convolution, depthwise convolution and dense operators
in the neural network. After tuning, we produce a log file which stores
the best knob values for all required operators. When the TVM compiler compiles
these operators, it will query this log file to get the best knob values.
We also released pre-tuned parameters for some arm devices. You can go to
`Mobile GPU Benchmark <https://github.com/apache/tvm/wiki/Benchmark#mobile-gpu>`_
to see the results.
Note that this tutorial will not run on Windows or recent versions of macOS. To
get it to run, you will need to wrap the body of this tutorial in a :code:`if
__name__ == "__main__":` block.
"""
######################################################################
# Install dependencies
# --------------------
# To use the autotvm package in tvm, we need to install some extra dependencies.
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user psutil xgboost tornado cloudpickle
#
# To make TVM run faster during tuning, it is recommended to use cython
# as FFI of tvm. In the root directory of tvm, execute
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user cython
# sudo make cython3
#
# Now return to python code. Import packages.
import os
import numpy as np
import tvm
from tvm import relay, autotvm
import tvm.relay.testing
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.contrib.utils import tempdir
import tvm.contrib.graph_executor as runtime
#################################################################
# Define network
# --------------
# First we need to define the network in relay frontend API.
# We can load some pre-defined network from :code:`relay.testing`.
# We can also load models from MXNet, ONNX and TensorFlow.
def get_network(name, batch_size):
"""Get the symbol definition and random weight of a network"""
input_shape = (batch_size, 3, 224, 224)
output_shape = (batch_size, 1000)
if "resnet" in name:
n_layer = int(name.split("-")[1])
mod, params = relay.testing.resnet.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif "vgg" in name:
n_layer = int(name.split("-")[1])
mod, params = relay.testing.vgg.get_workload(
num_layers=n_layer, batch_size=batch_size, dtype=dtype
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "squeezenet_v1.1":
mod, params = relay.testing.squeezenet.get_workload(
batch_size=batch_size, version="1.1", dtype=dtype
)
elif name == "inception_v3":
input_shape = (batch_size, 3, 299, 299)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("resnet18_v1", pretrained=True)
mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype)
net = mod["main"]
net = relay.Function(
net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs
)
mod = tvm.IRModule.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)
return mod, params, input_shape, output_shape
#################################################################
# .. _tutorials-autotvm-start-rpc-tracker:
#################################################################
# Start RPC Tracker
# -----------------
# TVM uses RPC session to communicate with ARM boards.
# During tuning, the tuner will send the generated code to the board and
# measure the speed of code on the board.
#
# To scale up the tuning, TVM uses RPC Tracker to manage distributed devices.
# The RPC Tracker is a centralized controller node. We can register all devices to
# the tracker. For example, if we have 10 phones, we can register all of them
# to the tracker, and run 10 measurements in parallel, accelerating the tuning process.
#
# To start an RPC tracker, run this command on the host machine. The tracker is
# required during the whole tuning process, so we need to open a new terminal for
# this command:
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190
#
# The expected output is
#
# .. code-block:: bash
#
# INFO:RPCTracker:bind to 0.0.0.0:9190
#################################################################
# Register Devices to RPC Tracker
# -----------------------------------
# Now we can register our devices to the tracker. The first step is to
# build the TVM runtime for the ARM devices.
#
# * For Linux:
# Follow this section :ref:`build-tvm-runtime-on-device` to build
# the TVM runtime on the device. Then register the device to tracker by
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_server --tracker=[HOST_IP]:9190 --key=rk3399
#
# (replace :code:`[HOST_IP]` with the IP address of your host machine)
#
# * For Android:
# Follow this `readme page <https://github.com/apache/tvm/tree/main/apps/android_rpc>`_ to
# install TVM RPC APK on the android device. Make sure you can pass the android RPC test.
# Then you have already registered your device. During tuning, you have to go to developer option
# and enable "Keep screen awake during changing" and charge your phone to make it stable.
#
# After registering devices, we can confirm it by querying rpc_tracker
#
# .. code-block:: bash
#
# python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190
#
# For example, if we have 2 Huawei mate10 pro, 11 Raspberry Pi 3B and 2 rk3399,
# the output can be
#
# .. code-block:: bash
#
# Queue Status
# ----------------------------------
# key total free pending
# ----------------------------------
# mate10pro 2 2 0
# rk3399 2 2 0
# rpi3b 11 11 0
# ----------------------------------
#
# You can register multiple devices to the tracker to accelerate the measurement in tuning.
###########################################
# Set Tuning Options
# ------------------
# Before tuning, we should apply some configurations. Here I use an RK3399 board
# as example. In your setting, you should modify the target and device_key accordingly.
# set :code:`use_android` to True if you use android phone.
#### DEVICE CONFIG ####
# Replace "aarch64-linux-gnu" with the correct target of your board.
# This target host is used for cross compilation. You can query it by :code:`gcc -v` on your device.
target = tvm.target.Target("opencl -device=mali", host="llvm -mtriple=aarch64-linux-gnu")
# Also replace this with the device key in your tracker
device_key = "rk3399"
# Set this to True if you use android phone
use_android = False
#### TUNING OPTION ####
network = "resnet-18"
log_file = "%s.%s.log" % (device_key, network)
dtype = "float32"
tuning_option = {
"log_filename": log_file,
"tuner": "xgb",
"n_trial": 1000,
"early_stopping": 450,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func="ndk" if use_android else "default"),
runner=autotvm.RPCRunner(
device_key,
host="127.0.0.1",
port=9190,
number=10,
timeout=5,
),
),
}
####################################################################
#
# .. note:: How to set tuning options
#
# In general, the default values provided here work well.
# If you have enough time budget, you can set :code:`n_trial`, :code:`early_stopping` larger,
# which makes the tuning run longer.
# If your device runs very slow or your conv2d operators have many GFLOPs, considering to
# set timeout larger.
#
###################################################################
# Begin Tuning
# ------------
# Now we can extract tuning tasks from the network and begin tuning.
# Here, we provide a simple utility function to tune a list of tasks.
# This function is just an initial implementation which tunes them in sequential order.
# We will introduce a more sophisticated tuning scheduler in the future.
# You can skip the implementation of this function for this tutorial.
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=1000,
early_stopping=None,
log_filename="tuning.log",
use_transfer_learning=True,
):
# create tmp log file
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# create tuner
if tuner == "xgb" or tuner == "xgb-rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
# do tuning
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
# pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
########################################################################
# Finally, we launch tuning jobs and evaluate the end-to-end performance.
def tune_and_evaluate(tuning_opt):
# extract workloads from relay program
print("Extract tasks...")
mod, params, input_shape, _ = get_network(network, batch_size=1)
tasks = autotvm.task.extract_from_program(
mod["main"],
target=target,
params=params,
ops=(relay.op.get("nn.conv2d"),),
)
# run tuning tasks
print("Tuning...")
tune_tasks(tasks, **tuning_opt)
# compile kernels with history best records
with autotvm.apply_history_best(log_file):
print("Compile...")
with tvm.transform.PassContext(opt_level=3):
lib = relay.build_module.build(mod, target=target, params=params)
# export library
tmp = tempdir()
if use_android:
from tvm.contrib import ndk
filename = "net.so"
lib.export_library(tmp.relpath(filename), ndk.create_shared)
else:
filename = "net.tar"
lib.export_library(tmp.relpath(filename))
# upload module to device
print("Upload...")
remote = autotvm.measure.request_remote(device_key, "127.0.0.1", 9190, timeout=10000)
remote.upload(tmp.relpath(filename))
rlib = remote.load_module(filename)
# upload parameters to device
dev = remote.device(str(target), 0)
module = runtime.GraphModule(rlib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
# evaluate
print("Evaluate inference time cost...")
ftimer = module.module.time_evaluator("run", dev, number=1, repeat=30)
prof_res = np.array(ftimer().results) * 1000 # convert to millisecond
print(
"Mean inference time (std dev): %.2f ms (%.2f ms)"
% (np.mean(prof_res), np.std(prof_res))
)
# We do not run the tuning in our webpage server since it takes too long.
# Uncomment the following line to run it by yourself.
# tune_and_evaluate(tuning_option)
######################################################################
# Sample Output
# -------------
# The tuning needs to compile many programs and extract feature from them.
# So a high performance CPU is recommended.
# One sample output is listed below. It takes about 3 hours on a 32T AMD Ryzen Threadripper.
#
# .. code-block:: bash
#
# Extract tasks...
# Tuning...
# [Task 1/17] Current/Best: 25.30/ 39.12 GFLOPS | Progress: (992/1000) | 751.22 s Done.
# [Task 2/17] Current/Best: 40.70/ 45.50 GFLOPS | Progress: (736/1000) | 545.46 s Done.
# [Task 3/17] Current/Best: 38.83/ 42.35 GFLOPS | Progress: (992/1000) | 1549.85 s Done.
# [Task 4/17] Current/Best: 23.31/ 31.02 GFLOPS | Progress: (640/1000) | 1059.31 s Done.
# [Task 5/17] Current/Best: 0.06/ 2.34 GFLOPS | Progress: (544/1000) | 305.45 s Done.
# [Task 6/17] Current/Best: 10.97/ 17.20 GFLOPS | Progress: (992/1000) | 1050.00 s Done.
# [Task 7/17] Current/Best: 8.98/ 10.94 GFLOPS | Progress: (928/1000) | 421.36 s Done.
# [Task 8/17] Current/Best: 4.48/ 14.86 GFLOPS | Progress: (704/1000) | 582.60 s Done.
# [Task 9/17] Current/Best: 10.30/ 25.99 GFLOPS | Progress: (864/1000) | 899.85 s Done.
# [Task 10/17] Current/Best: 11.73/ 12.52 GFLOPS | Progress: (608/1000) | 304.85 s Done.
# [Task 11/17] Current/Best: 15.26/ 18.68 GFLOPS | Progress: (800/1000) | 747.52 s Done.
# [Task 12/17] Current/Best: 17.48/ 26.71 GFLOPS | Progress: (1000/1000) | 1166.40 s Done.
# [Task 13/17] Current/Best: 0.96/ 11.43 GFLOPS | Progress: (960/1000) | 611.65 s Done.
# [Task 14/17] Current/Best: 17.88/ 20.22 GFLOPS | Progress: (672/1000) | 670.29 s Done.
# [Task 15/17] Current/Best: 11.62/ 13.98 GFLOPS | Progress: (736/1000) | 449.25 s Done.
# [Task 16/17] Current/Best: 19.90/ 23.83 GFLOPS | Progress: (608/1000) | 708.64 s Done.
# [Task 17/17] Current/Best: 17.98/ 22.75 GFLOPS | Progress: (736/1000) | 1122.60 s Done.
# Compile...
# Upload...
# Evaluate inference time cost...
# Mean inference time (std dev): 128.05 ms (7.74 ms)
#
######################################################################
#
# .. note:: **Experiencing Difficulties?**
#
# The auto tuning module is error-prone. If you always see " 0.00/ 0.00 GFLOPS",
# then there must be something wrong.
#
# First, make sure you set the correct configuration of your device.
# Then, you can print debug information by adding these lines in the beginning
# of the script. It will print every measurement result, where you can find useful
# error messages.
#
# .. code-block:: python
#
# import logging
# logging.getLogger('autotvm').setLevel(logging.DEBUG)
#
# Finally, always feel free to ask our community for help on https://discuss.tvm.apache.org
| 38.790476
| 100
| 0.631169
| true
| true
|
|
790c306b71032e576a44a0537655d890cf85245c
| 397
|
py
|
Python
|
terrascript/data/mrcrilly/awx.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/data/mrcrilly/awx.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/data/mrcrilly/awx.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/data/mrcrilly/awx.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:12:44 UTC)
import terrascript
class awx_credential(terrascript.Data):
pass
class awx_credential_azure_key_vault(terrascript.Data):
pass
class awx_credentials(terrascript.Data):
pass
__all__ = [
"awx_credential",
"awx_credential_azure_key_vault",
"awx_credentials",
]
| 17.26087
| 73
| 0.753149
|
import terrascript
class awx_credential(terrascript.Data):
pass
class awx_credential_azure_key_vault(terrascript.Data):
pass
class awx_credentials(terrascript.Data):
pass
__all__ = [
"awx_credential",
"awx_credential_azure_key_vault",
"awx_credentials",
]
| true
| true
|
790c31e8227a70c49df392a93c36d1ee7b23e14c
| 2,632
|
py
|
Python
|
spimdisasm/common/FileSectionType.py
|
Decompollaborate/py-mips-disasm
|
1301340130f8cca539d12c486deff3a1037a8787
|
[
"MIT"
] | 6
|
2021-11-17T21:21:25.000Z
|
2022-01-28T13:37:44.000Z
|
spimdisasm/common/FileSectionType.py
|
Decompollaborate/py-mips-disasm
|
1301340130f8cca539d12c486deff3a1037a8787
|
[
"MIT"
] | null | null | null |
spimdisasm/common/FileSectionType.py
|
Decompollaborate/py-mips-disasm
|
1301340130f8cca539d12c486deff3a1037a8787
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# SPDX-FileCopyrightText: © 2022 Decompollaborate
# SPDX-License-Identifier: MIT
from __future__ import annotations
import enum
@enum.unique
class FileSectionType(enum.Enum):
Unknown = -2
Invalid = -1
Text = 1
Data = 2
Rodata = 3
Bss = 4
Reloc = 5
@staticmethod
def fromId(sectionId: int) -> FileSectionType:
if sectionId == 1:
return FileSectionType.Text
if sectionId == 2:
return FileSectionType.Data
if sectionId == 3:
return FileSectionType.Rodata
if sectionId == 4:
return FileSectionType.Bss
if sectionId == 5:
return FileSectionType.Reloc
return FileSectionType.Invalid
@staticmethod
def fromStr(x: str) -> FileSectionType:
if x == ".text":
return FileSectionType.Text
if x == ".data":
return FileSectionType.Data
if x == ".rodata":
return FileSectionType.Rodata
if x == ".bss":
return FileSectionType.Bss
if x == ".reloc":
return FileSectionType.Reloc
return FileSectionType.Invalid
def toStr(self) -> str:
if self == FileSectionType.Text:
return ".text"
if self == FileSectionType.Data:
return ".data"
if self == FileSectionType.Rodata:
return ".rodata"
if self == FileSectionType.Bss:
return ".bss"
if self == FileSectionType.Reloc:
return ".reloc"
return ""
def toCapitalizedStr(self) -> str:
if self == FileSectionType.Text:
return "Text"
if self == FileSectionType.Data:
return "Data"
if self == FileSectionType.Rodata:
return "RoData"
if self == FileSectionType.Bss:
return "Bss"
if self == FileSectionType.Reloc:
return "Reloc"
return ""
def toSectionName(self) -> str:
if self == FileSectionType.Text:
return ".text"
if self == FileSectionType.Data:
return ".data"
if self == FileSectionType.Rodata:
return ".rodata"
if self == FileSectionType.Bss:
return ".bss"
if self == FileSectionType.Reloc:
return ".ovl"
return ""
FileSections_ListBasic = [FileSectionType.Text, FileSectionType.Data, FileSectionType.Rodata, FileSectionType.Bss]
FileSections_ListAll = [FileSectionType.Text, FileSectionType.Data, FileSectionType.Rodata, FileSectionType.Bss, FileSectionType.Reloc]
| 28.923077
| 135
| 0.582067
|
from __future__ import annotations
import enum
@enum.unique
class FileSectionType(enum.Enum):
Unknown = -2
Invalid = -1
Text = 1
Data = 2
Rodata = 3
Bss = 4
Reloc = 5
@staticmethod
def fromId(sectionId: int) -> FileSectionType:
if sectionId == 1:
return FileSectionType.Text
if sectionId == 2:
return FileSectionType.Data
if sectionId == 3:
return FileSectionType.Rodata
if sectionId == 4:
return FileSectionType.Bss
if sectionId == 5:
return FileSectionType.Reloc
return FileSectionType.Invalid
@staticmethod
def fromStr(x: str) -> FileSectionType:
if x == ".text":
return FileSectionType.Text
if x == ".data":
return FileSectionType.Data
if x == ".rodata":
return FileSectionType.Rodata
if x == ".bss":
return FileSectionType.Bss
if x == ".reloc":
return FileSectionType.Reloc
return FileSectionType.Invalid
def toStr(self) -> str:
if self == FileSectionType.Text:
return ".text"
if self == FileSectionType.Data:
return ".data"
if self == FileSectionType.Rodata:
return ".rodata"
if self == FileSectionType.Bss:
return ".bss"
if self == FileSectionType.Reloc:
return ".reloc"
return ""
def toCapitalizedStr(self) -> str:
if self == FileSectionType.Text:
return "Text"
if self == FileSectionType.Data:
return "Data"
if self == FileSectionType.Rodata:
return "RoData"
if self == FileSectionType.Bss:
return "Bss"
if self == FileSectionType.Reloc:
return "Reloc"
return ""
def toSectionName(self) -> str:
if self == FileSectionType.Text:
return ".text"
if self == FileSectionType.Data:
return ".data"
if self == FileSectionType.Rodata:
return ".rodata"
if self == FileSectionType.Bss:
return ".bss"
if self == FileSectionType.Reloc:
return ".ovl"
return ""
FileSections_ListBasic = [FileSectionType.Text, FileSectionType.Data, FileSectionType.Rodata, FileSectionType.Bss]
FileSections_ListAll = [FileSectionType.Text, FileSectionType.Data, FileSectionType.Rodata, FileSectionType.Bss, FileSectionType.Reloc]
| true
| true
|
790c32855a158da0112d5869bb7c58352e860b86
| 2,141
|
py
|
Python
|
examples/lift-sub.py
|
LaPetiteSouris/python-liftbridge
|
e97f795ae1be8ea871ac032d61464fb91a45973e
|
[
"Apache-2.0"
] | 11
|
2019-08-20T22:46:26.000Z
|
2021-05-25T18:41:10.000Z
|
examples/lift-sub.py
|
LaPetiteSouris/python-liftbridge
|
e97f795ae1be8ea871ac032d61464fb91a45973e
|
[
"Apache-2.0"
] | 6
|
2019-09-07T08:53:33.000Z
|
2020-07-05T12:27:36.000Z
|
examples/lift-sub.py
|
LaPetiteSouris/python-liftbridge
|
e97f795ae1be8ea871ac032d61464fb91a45973e
|
[
"Apache-2.0"
] | 4
|
2019-09-01T02:18:31.000Z
|
2020-07-03T17:05:46.000Z
|
import argparse
import logging
from datetime import datetime
from python_liftbridge import ErrNoSuchStream
from python_liftbridge import ErrStreamExists
from python_liftbridge import Lift
from python_liftbridge import Stream
def parse_arguments():
'''Argument parsing for the script'''
parser = argparse.ArgumentParser(
description='Liftbridge sub script.',
)
parser.add_argument(
'subject',
metavar='subject',
)
parser.add_argument(
'stream',
metavar='stream',
)
parser.add_argument(
'-s',
'--server',
metavar='s',
nargs='?',
default='127.0.0.1:9292',
help='(default: %(default)s)',
)
parser.add_argument(
'-t',
'--timestamp',
action='store_true',
help='Display timestamps',
)
parser.add_argument(
'-c',
'--create',
action='store_true',
help="Creates the stream in case it doesn't exist",
)
parser.add_argument(
'-d',
'--debug',
action='store_true',
help='Shows debug logs',
)
return parser.parse_args()
def main():
args = parse_arguments()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
client = Lift(ip_address=args.server)
count = 0
if args.create:
try:
client.create_stream(Stream(args.subject, args.stream))
except ErrStreamExists:
pass
try:
for message in client.subscribe(
Stream(
args.subject,
args.stream,
).start_at_earliest_received(),
):
print("{} [#{}] Received on [{} - {}]: '{}'".format(
datetime.fromtimestamp(
int(message.timestamp) /
1000000000,
), count, args.subject, args.stream, message.value.decode('utf-8'),
))
count = count + 1
except ErrNoSuchStream:
print("The stream {} doesn't exist. With -c or --create it's creation can be forced."
.format(args.stream))
main()
| 23.788889
| 93
| 0.554414
|
import argparse
import logging
from datetime import datetime
from python_liftbridge import ErrNoSuchStream
from python_liftbridge import ErrStreamExists
from python_liftbridge import Lift
from python_liftbridge import Stream
def parse_arguments():
parser = argparse.ArgumentParser(
description='Liftbridge sub script.',
)
parser.add_argument(
'subject',
metavar='subject',
)
parser.add_argument(
'stream',
metavar='stream',
)
parser.add_argument(
'-s',
'--server',
metavar='s',
nargs='?',
default='127.0.0.1:9292',
help='(default: %(default)s)',
)
parser.add_argument(
'-t',
'--timestamp',
action='store_true',
help='Display timestamps',
)
parser.add_argument(
'-c',
'--create',
action='store_true',
help="Creates the stream in case it doesn't exist",
)
parser.add_argument(
'-d',
'--debug',
action='store_true',
help='Shows debug logs',
)
return parser.parse_args()
def main():
args = parse_arguments()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
client = Lift(ip_address=args.server)
count = 0
if args.create:
try:
client.create_stream(Stream(args.subject, args.stream))
except ErrStreamExists:
pass
try:
for message in client.subscribe(
Stream(
args.subject,
args.stream,
).start_at_earliest_received(),
):
print("{} [#{}] Received on [{} - {}]: '{}'".format(
datetime.fromtimestamp(
int(message.timestamp) /
1000000000,
), count, args.subject, args.stream, message.value.decode('utf-8'),
))
count = count + 1
except ErrNoSuchStream:
print("The stream {} doesn't exist. With -c or --create it's creation can be forced."
.format(args.stream))
main()
| true
| true
|
790c3393108cd9898b7e0ad17e69308a6cc855fd
| 1,108
|
py
|
Python
|
builder/builder.py
|
bdastur/builder
|
e05c013d01c4e82340879289940b3029fc6de266
|
[
"Apache-2.0"
] | null | null | null |
builder/builder.py
|
bdastur/builder
|
e05c013d01c4e82340879289940b3029fc6de266
|
[
"Apache-2.0"
] | null | null | null |
builder/builder.py
|
bdastur/builder
|
e05c013d01c4e82340879289940b3029fc6de266
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
import builderutils.parser as parser
import builderutils.renderer as renderer
import builderutils.dom as dom
@click.group()
def cli():
pass
@click.command()
@click.option("--configfile", type=click.Path(), help="Builder config", required=True)
def create(configfile):
print("create command!")
parserObj = parser.ConfigParser(configfile)
print("Parser Obj: ", parserObj)
domObj = dom.DomManager(parserObj)
domObj.buildDomTree()
dom.DomManager.parseDomTree(dom.SAMPLE_DOM)
# parserObj = parser.BuilderParser(configfile)
# renderObj = renderer.Renderer()
# renderObj.build_staging_environment(parserObj.parsedData)
# userConfig = parserObj.parsedData["user_config"]
# htmlTemplate = parserObj.parsedData["html_template"]
# flaskTemplate = parserObj.parsedData["flask_template"]
# renderObj.build_html_documents(htmlTemplate, userConfig)
# renderObj.build_flask_app(flaskTemplate, userConfig)
def main():
cli.add_command(create)
cli()
if __name__ == "__main__":
main()
| 25.181818
| 86
| 0.723827
|
import click
import builderutils.parser as parser
import builderutils.renderer as renderer
import builderutils.dom as dom
@click.group()
def cli():
pass
@click.command()
@click.option("--configfile", type=click.Path(), help="Builder config", required=True)
def create(configfile):
print("create command!")
parserObj = parser.ConfigParser(configfile)
print("Parser Obj: ", parserObj)
domObj = dom.DomManager(parserObj)
domObj.buildDomTree()
dom.DomManager.parseDomTree(dom.SAMPLE_DOM)
def main():
cli.add_command(create)
cli()
if __name__ == "__main__":
main()
| true
| true
|
790c34315189b20923ce14ee5580100c099a4091
| 9,984
|
py
|
Python
|
sdk/python/pulumi_okta/group_memberships.py
|
pulumi/pulumi-okta
|
83f7617a85b3d05213901773fa4e6a151ab6076b
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2019-10-29T21:59:22.000Z
|
2021-11-08T12:00:24.000Z
|
sdk/python/pulumi_okta/group_memberships.py
|
pulumi/pulumi-okta
|
83f7617a85b3d05213901773fa4e6a151ab6076b
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2020-01-06T10:28:09.000Z
|
2022-03-25T19:52:40.000Z
|
sdk/python/pulumi_okta/group_memberships.py
|
pulumi/pulumi-okta
|
83f7617a85b3d05213901773fa4e6a151ab6076b
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-09-11T16:31:04.000Z
|
2020-11-24T12:23:17.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['GroupMembershipsArgs', 'GroupMemberships']
@pulumi.input_type
class GroupMembershipsArgs:
def __init__(__self__, *,
group_id: pulumi.Input[str],
users: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
The set of arguments for constructing a GroupMemberships resource.
:param pulumi.Input[str] group_id: ID of a Okta group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for.
"""
pulumi.set(__self__, "group_id", group_id)
pulumi.set(__self__, "users", users)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Input[str]:
"""
ID of a Okta group.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter
def users(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The list of Okta user IDs which the group should have membership managed for.
"""
return pulumi.get(self, "users")
@users.setter
def users(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "users", value)
@pulumi.input_type
class _GroupMembershipsState:
def __init__(__self__, *,
group_id: Optional[pulumi.Input[str]] = None,
users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering GroupMemberships resources.
:param pulumi.Input[str] group_id: ID of a Okta group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for.
"""
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if users is not None:
pulumi.set(__self__, "users", users)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of a Okta group.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter
def users(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of Okta user IDs which the group should have membership managed for.
"""
return pulumi.get(self, "users")
@users.setter
def users(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "users", value)
class GroupMemberships(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
group_id: Optional[pulumi.Input[str]] = None,
users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Resource to manage a set of memberships for a specific group.
This resource will allow you to bulk manage group membership in Okta for a given group. This offers an interface to pass multiple users into a single resource call, for better API resource usage. Effectively this is the same as using the `group.Membership` resource several times with a single group and many different users. If you need a relationship of a single user to many groups, please use the `UserGroupMemberships` resource.
When using this with a `user.User` resource, you should add a lifecycle ignore for group memberships to avoid conflicts in desired state.
## Example Usage
```python
import pulumi
import pulumi_okta as okta
test_group = okta.group.Group("testGroup", description="testing, testing")
test_group_memberships = okta.GroupMemberships("testGroupMemberships",
group_id=test_group.id,
users=[
okta_user["test1"]["id"],
okta_user["test2"]["id"],
])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] group_id: ID of a Okta group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: GroupMembershipsArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource to manage a set of memberships for a specific group.
This resource will allow you to bulk manage group membership in Okta for a given group. This offers an interface to pass multiple users into a single resource call, for better API resource usage. Effectively this is the same as using the `group.Membership` resource several times with a single group and many different users. If you need a relationship of a single user to many groups, please use the `UserGroupMemberships` resource.
When using this with a `user.User` resource, you should add a lifecycle ignore for group memberships to avoid conflicts in desired state.
## Example Usage
```python
import pulumi
import pulumi_okta as okta
test_group = okta.group.Group("testGroup", description="testing, testing")
test_group_memberships = okta.GroupMemberships("testGroupMemberships",
group_id=test_group.id,
users=[
okta_user["test1"]["id"],
okta_user["test2"]["id"],
])
```
:param str resource_name: The name of the resource.
:param GroupMembershipsArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GroupMembershipsArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
group_id: Optional[pulumi.Input[str]] = None,
users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = GroupMembershipsArgs.__new__(GroupMembershipsArgs)
if group_id is None and not opts.urn:
raise TypeError("Missing required property 'group_id'")
__props__.__dict__["group_id"] = group_id
if users is None and not opts.urn:
raise TypeError("Missing required property 'users'")
__props__.__dict__["users"] = users
super(GroupMemberships, __self__).__init__(
'okta:index/groupMemberships:GroupMemberships',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
group_id: Optional[pulumi.Input[str]] = None,
users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'GroupMemberships':
"""
Get an existing GroupMemberships resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] group_id: ID of a Okta group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _GroupMembershipsState.__new__(_GroupMembershipsState)
__props__.__dict__["group_id"] = group_id
__props__.__dict__["users"] = users
return GroupMemberships(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Output[str]:
"""
ID of a Okta group.
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter
def users(self) -> pulumi.Output[Sequence[str]]:
"""
The list of Okta user IDs which the group should have membership managed for.
"""
return pulumi.get(self, "users")
| 42.485106
| 441
| 0.644531
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['GroupMembershipsArgs', 'GroupMemberships']
@pulumi.input_type
class GroupMembershipsArgs:
def __init__(__self__, *,
group_id: pulumi.Input[str],
users: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(__self__, "group_id", group_id)
pulumi.set(__self__, "users", users)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter
def users(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "users")
@users.setter
def users(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "users", value)
@pulumi.input_type
class _GroupMembershipsState:
def __init__(__self__, *,
group_id: Optional[pulumi.Input[str]] = None,
users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if users is not None:
pulumi.set(__self__, "users", users)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter
def users(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "users")
@users.setter
def users(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "users", value)
class GroupMemberships(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
group_id: Optional[pulumi.Input[str]] = None,
users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: GroupMembershipsArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GroupMembershipsArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
group_id: Optional[pulumi.Input[str]] = None,
users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = GroupMembershipsArgs.__new__(GroupMembershipsArgs)
if group_id is None and not opts.urn:
raise TypeError("Missing required property 'group_id'")
__props__.__dict__["group_id"] = group_id
if users is None and not opts.urn:
raise TypeError("Missing required property 'users'")
__props__.__dict__["users"] = users
super(GroupMemberships, __self__).__init__(
'okta:index/groupMemberships:GroupMemberships',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
group_id: Optional[pulumi.Input[str]] = None,
users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'GroupMemberships':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _GroupMembershipsState.__new__(_GroupMembershipsState)
__props__.__dict__["group_id"] = group_id
__props__.__dict__["users"] = users
return GroupMemberships(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "group_id")
@property
@pulumi.getter
def users(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "users")
| true
| true
|
790c34351d8a5722887cf5d4aa217305f90b0303
| 2,915
|
py
|
Python
|
scripts/channel_map.py
|
saucecontrol/performance
|
07a9972ebb445b66e8524078502ac305c577891a
|
[
"MIT"
] | 1
|
2020-04-15T01:29:12.000Z
|
2020-04-15T01:29:12.000Z
|
scripts/channel_map.py
|
saucecontrol/performance
|
07a9972ebb445b66e8524078502ac305c577891a
|
[
"MIT"
] | null | null | null |
scripts/channel_map.py
|
saucecontrol/performance
|
07a9972ebb445b66e8524078502ac305c577891a
|
[
"MIT"
] | null | null | null |
from argparse import ArgumentParser
class ChannelMap():
channel_map = {
'master': {
'tfm': 'netcoreapp5.0',
'branch': 'master'
},
'release/3.1.2xx': {
'tfm': 'netcoreapp3.1',
'branch': 'release/3.1.2xx'
},
'release/3.1.1xx': {
'tfm': 'netcoreapp3.1',
'branch': 'release/3.1.1xx'
},
'3.1': {
'tfm': 'netcoreapp3.1',
'branch': 'release/3.1'
},
'3.0': {
'tfm': 'netcoreapp3.0',
'branch': 'release/3.0'
},
'release/2.1.6xx': {
'tfm': 'netcoreapp2.1',
'branch': 'release/2.1.6xx'
},
'2.1': {
'tfm': 'netcoreapp2.1',
'branch': 'release/2.1'
},
'LTS': {
'tfm': 'net461', # For Full Framework download the LTS for dotnet cli.
'branch': 'LTS'
}
}
@staticmethod
def get_supported_channels() -> list:
'''List of supported channels.'''
return list(ChannelMap.channel_map.keys())
@staticmethod
def get_supported_frameworks() -> list:
'''List of supported frameworks'''
frameworks = [ChannelMap.channel_map[channel]['tfm'] for channel in ChannelMap.channel_map]
return set(frameworks)
@staticmethod
def get_branch(channel: str) -> str:
if channel in ChannelMap.channel_map:
return ChannelMap.channel_map[channel]['branch']
else:
raise Exception('Channel %s is not supported. Supported channels %s' % (channel, ChannelMap.get_supported_channels()))
@staticmethod
def get_target_framework_monikers(channels: list) -> list:
'''
Translates channel names to Target Framework Monikers (TFMs).
'''
monikers = [
ChannelMap.get_target_framework_moniker(channel)
for channel in channels
]
return list(set(monikers))
@staticmethod
def get_target_framework_moniker(channel: str) -> str:
'''
Translate channel name to Target Framework Moniker (TFM)
'''
if channel in ChannelMap.channel_map:
return ChannelMap.channel_map[channel]['tfm']
else:
raise Exception('Channel %s is not supported. Supported channels %s' % (channel, ChannelMap.get_supported_channels()))
@staticmethod
def get_channel_from_target_framework_moniker(target_framework_moniker: str) -> str:
'''Translate Target Framework Moniker (TFM) to channel name'''
for channel in ChannelMap.channel_map:
if ChannelMap.channel_map[channel]['tfm'] == target_framework_moniker:
return channel
raise Exception('Framework %s is not supported. Supported frameworks: %s' % (target_framework_moniker, ChannelMap.get_supported_frameworks()))
| 34.702381
| 150
| 0.575643
|
from argparse import ArgumentParser
class ChannelMap():
channel_map = {
'master': {
'tfm': 'netcoreapp5.0',
'branch': 'master'
},
'release/3.1.2xx': {
'tfm': 'netcoreapp3.1',
'branch': 'release/3.1.2xx'
},
'release/3.1.1xx': {
'tfm': 'netcoreapp3.1',
'branch': 'release/3.1.1xx'
},
'3.1': {
'tfm': 'netcoreapp3.1',
'branch': 'release/3.1'
},
'3.0': {
'tfm': 'netcoreapp3.0',
'branch': 'release/3.0'
},
'release/2.1.6xx': {
'tfm': 'netcoreapp2.1',
'branch': 'release/2.1.6xx'
},
'2.1': {
'tfm': 'netcoreapp2.1',
'branch': 'release/2.1'
},
'LTS': {
'tfm': 'net461',
'branch': 'LTS'
}
}
@staticmethod
def get_supported_channels() -> list:
return list(ChannelMap.channel_map.keys())
@staticmethod
def get_supported_frameworks() -> list:
frameworks = [ChannelMap.channel_map[channel]['tfm'] for channel in ChannelMap.channel_map]
return set(frameworks)
@staticmethod
def get_branch(channel: str) -> str:
if channel in ChannelMap.channel_map:
return ChannelMap.channel_map[channel]['branch']
else:
raise Exception('Channel %s is not supported. Supported channels %s' % (channel, ChannelMap.get_supported_channels()))
@staticmethod
def get_target_framework_monikers(channels: list) -> list:
monikers = [
ChannelMap.get_target_framework_moniker(channel)
for channel in channels
]
return list(set(monikers))
@staticmethod
def get_target_framework_moniker(channel: str) -> str:
if channel in ChannelMap.channel_map:
return ChannelMap.channel_map[channel]['tfm']
else:
raise Exception('Channel %s is not supported. Supported channels %s' % (channel, ChannelMap.get_supported_channels()))
@staticmethod
def get_channel_from_target_framework_moniker(target_framework_moniker: str) -> str:
for channel in ChannelMap.channel_map:
if ChannelMap.channel_map[channel]['tfm'] == target_framework_moniker:
return channel
raise Exception('Framework %s is not supported. Supported frameworks: %s' % (target_framework_moniker, ChannelMap.get_supported_frameworks()))
| true
| true
|
790c351e85dfe8de80f45ed9b33eee6eb2111f60
| 213
|
py
|
Python
|
setup.py
|
foundling/filekit
|
211ae8e99efe35a5a984c78349f85cfa7f42506a
|
[
"MIT"
] | null | null | null |
setup.py
|
foundling/filekit
|
211ae8e99efe35a5a984c78349f85cfa7f42506a
|
[
"MIT"
] | null | null | null |
setup.py
|
foundling/filekit
|
211ae8e99efe35a5a984c78349f85cfa7f42506a
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
setup(
name='filekit',
version='0.1',
packages=['filekit'],
license='MIT',
long_description=open('README.md').read(),
long_description_content_type='md'
)
| 19.363636
| 46
| 0.661972
|
from distutils.core import setup
setup(
name='filekit',
version='0.1',
packages=['filekit'],
license='MIT',
long_description=open('README.md').read(),
long_description_content_type='md'
)
| true
| true
|
790c38aa18526ff3a77826fd4a9ebe3cd5a1334c
| 1,179
|
py
|
Python
|
data_driven_acquisition/migrations/0005_auto_20191029_1531.py
|
adam-grandt-tts/data-driven-acquisition
|
2f970a2815f90f591203c02c9099642e4cbd24d8
|
[
"CC0-1.0"
] | 1
|
2020-02-14T17:36:27.000Z
|
2020-02-14T17:36:27.000Z
|
data_driven_acquisition/migrations/0005_auto_20191029_1531.py
|
adam-grandt-tts/data-driven-acquisition
|
2f970a2815f90f591203c02c9099642e4cbd24d8
|
[
"CC0-1.0"
] | 20
|
2020-01-21T15:04:16.000Z
|
2021-08-05T16:18:06.000Z
|
data_driven_acquisition/migrations/0005_auto_20191029_1531.py
|
adam-grandt-tts/data-driven-acquisition
|
2f970a2815f90f591203c02c9099642e4cbd24d8
|
[
"CC0-1.0"
] | 2
|
2020-05-10T18:29:54.000Z
|
2021-03-15T18:12:07.000Z
|
# Generated by Django 2.2.6 on 2019-10-29 15:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_driven_acquisition', '0004_acl'),
]
operations = [
migrations.AlterModelOptions(
name='file',
options={'get_latest_by': 'created_at', 'permissions': (('can_edit_content', 'Can edit file content'),)},
),
migrations.AlterModelOptions(
name='folder',
options={'get_latest_by': 'created_at', 'permissions': (('can_set_properties', 'Can set properties on folder'), ('can_propagate_properties', 'Can propagate properties to children.'), ('can_edit_child_content', 'Can edit content of children.'))},
),
migrations.AlterModelOptions(
name='packagetemplate',
options={'get_latest_by': 'created_at', 'permissions': (('can_deploy', 'Can deploy from template'),)},
),
migrations.AddField(
model_name='packagetemplate',
name='title',
field=models.CharField(default='lorem ipsum', max_length=256),
preserve_default=False,
),
]
| 36.84375
| 257
| 0.616624
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_driven_acquisition', '0004_acl'),
]
operations = [
migrations.AlterModelOptions(
name='file',
options={'get_latest_by': 'created_at', 'permissions': (('can_edit_content', 'Can edit file content'),)},
),
migrations.AlterModelOptions(
name='folder',
options={'get_latest_by': 'created_at', 'permissions': (('can_set_properties', 'Can set properties on folder'), ('can_propagate_properties', 'Can propagate properties to children.'), ('can_edit_child_content', 'Can edit content of children.'))},
),
migrations.AlterModelOptions(
name='packagetemplate',
options={'get_latest_by': 'created_at', 'permissions': (('can_deploy', 'Can deploy from template'),)},
),
migrations.AddField(
model_name='packagetemplate',
name='title',
field=models.CharField(default='lorem ipsum', max_length=256),
preserve_default=False,
),
]
| true
| true
|
790c38ab19cd66c0bd88351a146c434418246fb5
| 570
|
py
|
Python
|
model/group.py
|
Docent321/python_traning
|
eb4b9f6ba9821f6e32dce954c83754d40b8f66c5
|
[
"Apache-2.0"
] | null | null | null |
model/group.py
|
Docent321/python_traning
|
eb4b9f6ba9821f6e32dce954c83754d40b8f66c5
|
[
"Apache-2.0"
] | null | null | null |
model/group.py
|
Docent321/python_traning
|
eb4b9f6ba9821f6e32dce954c83754d40b8f66c5
|
[
"Apache-2.0"
] | null | null | null |
from sys import maxsize
class Group:
def __init__(self, name=None, header=None, footer=None, id=None):
self.name = name
self.header = header
self.footer = footer
self.id = id
def __repr__(self):
return"%s:%s:%s:%s" % (self.id, self.name, self.header, self.footer)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| 23.75
| 103
| 0.582456
|
from sys import maxsize
class Group:
def __init__(self, name=None, header=None, footer=None, id=None):
self.name = name
self.header = header
self.footer = footer
self.id = id
def __repr__(self):
return"%s:%s:%s:%s" % (self.id, self.name, self.header, self.footer)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| true
| true
|
790c38c261dbd3597bbec5dac7ffcb4c4d124539
| 2,419
|
py
|
Python
|
examples/python/helloworld/greeter_client.py
|
DMCDavi/grpc-stateful-less
|
d46da0352db3b83287b09d94b354b3a80571371b
|
[
"BSD-3-Clause"
] | null | null | null |
examples/python/helloworld/greeter_client.py
|
DMCDavi/grpc-stateful-less
|
d46da0352db3b83287b09d94b354b3a80571371b
|
[
"BSD-3-Clause"
] | null | null | null |
examples/python/helloworld/greeter_client.py
|
DMCDavi/grpc-stateful-less
|
d46da0352db3b83287b09d94b354b3a80571371b
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC helloworld.Greeter client."""
from __future__ import print_function
import logging
from urllib import response
from vinte_um import Jogador, VinteUm
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
import time
import redis
def createLoginForm(stub):
username = input("Digite seu login: ")
password = input("Digite sua senha: ")
_redis = redis.Redis(
host= 'localhost',
port= '6379',
password = 'davi')
_redis.set('username', username)
value = _redis.get('username')
print("variavel do redis:", value)
return stub.Login(helloworld_pb2.LoginRequest(username=username, password=password))
def runTurn(stub, auth_token):
extraCard = input("Deseja cavar mais uma carta? S/N: ")
return stub.TurnAction(helloworld_pb2.TurnRequest(auth_token=auth_token, dig = extraCard))
def run():
# NOTE(gRPC Python Team): .close() is possible on a channel and should be
# used in circumstances in which the with statement does not fit the needs
# of the code.
with grpc.insecure_channel('0.0.0.0:50051') as channel:
stub = helloworld_pb2_grpc.GreeterStub(channel)
login = createLoginForm(stub)
print("Suas cartas são: ", login.message)
while True:
turnResponse = runTurn(stub, login.auth_token)
print("Suas cartas são: ", turnResponse.cards)
if turnResponse.message:
print(turnResponse.message)
if turnResponse.playing == "False":
break
winner = stub.VerifyTurn(helloworld_pb2.VerifyTurnRequest(auth_token=login.auth_token))
print(winner.message)
if __name__ == '__main__':
logging.basicConfig()
run()
| 35.057971
| 98
| 0.678379
|
from __future__ import print_function
import logging
from urllib import response
from vinte_um import Jogador, VinteUm
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
import time
import redis
def createLoginForm(stub):
username = input("Digite seu login: ")
password = input("Digite sua senha: ")
_redis = redis.Redis(
host= 'localhost',
port= '6379',
password = 'davi')
_redis.set('username', username)
value = _redis.get('username')
print("variavel do redis:", value)
return stub.Login(helloworld_pb2.LoginRequest(username=username, password=password))
def runTurn(stub, auth_token):
extraCard = input("Deseja cavar mais uma carta? S/N: ")
return stub.TurnAction(helloworld_pb2.TurnRequest(auth_token=auth_token, dig = extraCard))
def run():
with grpc.insecure_channel('0.0.0.0:50051') as channel:
stub = helloworld_pb2_grpc.GreeterStub(channel)
login = createLoginForm(stub)
print("Suas cartas são: ", login.message)
while True:
turnResponse = runTurn(stub, login.auth_token)
print("Suas cartas são: ", turnResponse.cards)
if turnResponse.message:
print(turnResponse.message)
if turnResponse.playing == "False":
break
winner = stub.VerifyTurn(helloworld_pb2.VerifyTurnRequest(auth_token=login.auth_token))
print(winner.message)
if __name__ == '__main__':
logging.basicConfig()
run()
| true
| true
|
790c391f9976088578bd7d0802480606dc8141ea
| 14,998
|
py
|
Python
|
lib/third_party/dns/rdata.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | null | null | null |
lib/third_party/dns/rdata.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 11
|
2020-02-29T02:51:12.000Z
|
2022-03-30T23:20:08.000Z
|
lib/third_party/dns/rdata.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 1
|
2020-07-24T18:47:35.000Z
|
2020-07-24T18:47:35.000Z
|
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdata.
@var _rdata_modules: A dictionary mapping a (rdclass, rdtype) tuple to
the module which implements that type.
@type _rdata_modules: dict
@var _module_prefix: The prefix to use when forming modules names. The
default is 'dns.rdtypes'. Changing this value will break the library.
@type _module_prefix: string
@var _hex_chunk: At most this many octets that will be represented in each
chunk of hexstring that _hexify() produces before whitespace occurs.
@type _hex_chunk: int"""
from io import BytesIO
import base64
import binascii
import dns.exception
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.tokenizer
import dns.wiredata
from ._compat import xrange, string_types, text_type
_hex_chunksize = 32
def _hexify(data, chunksize=_hex_chunksize):
"""Convert a binary string into its hex encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
@rtype: string
"""
line = binascii.hexlify(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
_base64_chunksize = 32
def _base64ify(data, chunksize=_base64_chunksize):
"""Convert a binary string into its base64 encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is
L{dns.rdata._base64_chunksize}
@rtype: string
"""
line = base64.b64encode(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
__escaped = bytearray(b'"\\')
def _escapify(qstring):
"""Escape the characters in a quoted string which need it.
@param qstring: the string
@type qstring: string
@returns: the escaped string
@rtype: string
"""
if isinstance(qstring, text_type):
qstring = qstring.encode()
if not isinstance(qstring, bytearray):
qstring = bytearray(qstring)
text = ''
for c in qstring:
if c in __escaped:
text += '\\' + chr(c)
elif c >= 0x20 and c < 0x7F:
text += chr(c)
else:
text += '\\%03d' % c
return text
def _truncate_bitmap(what):
"""Determine the index of greatest byte that isn't all zeros, and
return the bitmap that contains all the bytes less than that index.
@param what: a string of octets representing a bitmap.
@type what: string
@rtype: string
"""
for i in xrange(len(what) - 1, -1, -1):
if what[i] != 0:
return what[0: i + 1]
return what[0:1]
class Rdata(object):
"""Base class for all DNS rdata types.
"""
__slots__ = ['rdclass', 'rdtype']
def __init__(self, rdclass, rdtype):
"""Initialize an rdata.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
"""
self.rdclass = rdclass
self.rdtype = rdtype
def covers(self):
"""DNS SIG/RRSIG rdatas apply to a specific type; this type is
returned by the covers() function. If the rdata type is not
SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
creating rdatasets, allowing the rdataset to contain only RRSIGs
of a particular type, e.g. RRSIG(NS).
@rtype: int
"""
return dns.rdatatype.NONE
def extended_rdatatype(self):
"""Return a 32-bit type value, the least significant 16 bits of
which are the ordinary DNS type, and the upper 16 bits of which are
the "covered" type, if any.
@rtype: int
"""
return self.covers() << 16 | self.rdtype
def to_text(self, origin=None, relativize=True, **kw):
"""Convert an rdata to text format.
@rtype: string
"""
raise NotImplementedError
def to_wire(self, file, compress=None, origin=None):
"""Convert an rdata to wire format.
@rtype: string
"""
raise NotImplementedError
def to_digestable(self, origin=None):
"""Convert rdata to a format suitable for digesting in hashes. This
is also the DNSSEC canonical form."""
f = BytesIO()
self.to_wire(f, None, origin)
return f.getvalue()
def validate(self):
"""Check that the current contents of the rdata's fields are
valid. If you change an rdata by assigning to its fields,
it is a good idea to call validate() when you are done making
changes.
"""
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
def __repr__(self):
covers = self.covers()
if covers == dns.rdatatype.NONE:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
str(self) + '>'
def __str__(self):
return self.to_text()
def _cmp(self, other):
"""Compare an rdata with another rdata of the same rdtype and
rdclass. Return < 0 if self < other in the DNSSEC ordering,
0 if self == other, and > 0 if self > other.
"""
our = self.to_digestable(dns.name.root)
their = other.to_digestable(dns.name.root)
if our == their:
return 0
if our > their:
return 1
return -1
def __eq__(self, other):
if not isinstance(other, Rdata):
return False
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Rdata):
return True
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return True
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) > 0
def __hash__(self):
return hash(self.to_digestable(dns.name.root))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
def choose_relativity(self, origin=None, relativize=True):
"""Convert any domain names in the rdata to the specified
relativization.
"""
pass
class GenericRdata(Rdata):
"""Generate Rdata Class
This class is used for rdata types for which we have no better
implementation. It implements the DNS "unknown RRs" scheme.
"""
__slots__ = ['data']
def __init__(self, rdclass, rdtype, data):
super(GenericRdata, self).__init__(rdclass, rdtype)
self.data = data
def to_text(self, origin=None, relativize=True, **kw):
return r'\# %d ' % len(self.data) + _hexify(self.data)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
token = tok.get()
if not token.is_identifier() or token.value != r'\#':
raise dns.exception.SyntaxError(
r'generic rdata does not start with \#')
length = tok.get_int()
chunks = []
while 1:
token = tok.get()
if token.is_eol_or_eof():
break
chunks.append(token.value.encode())
hex = b''.join(chunks)
data = binascii.unhexlify(hex)
if len(data) != length:
raise dns.exception.SyntaxError(
'generic rdata hex data has wrong length')
return cls(rdclass, rdtype, data)
def to_wire(self, file, compress=None, origin=None):
file.write(self.data)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
return cls(rdclass, rdtype, wire[current: current + rdlen])
_rdata_modules = {}
_module_prefix = 'dns.rdtypes'
def get_rdata_class(rdclass, rdtype):
def import_module(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
mod = _rdata_modules.get((rdclass, rdtype))
rdclass_text = dns.rdataclass.to_text(rdclass)
rdtype_text = dns.rdatatype.to_text(rdtype)
rdtype_text = rdtype_text.replace('-', '_')
if not mod:
mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
if not mod:
try:
mod = import_module('.'.join([_module_prefix,
rdclass_text, rdtype_text]))
_rdata_modules[(rdclass, rdtype)] = mod
except ImportError:
try:
mod = import_module('.'.join([_module_prefix,
'ANY', rdtype_text]))
_rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
except ImportError:
mod = None
if mod:
cls = getattr(mod, rdtype_text)
else:
cls = GenericRdata
return cls
def from_text(rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_text() class method is called
with the parameters to this function.
If I{tok} is a string, then a tokenizer is created and the string
is used as its input.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer or input text
@type tok: dns.tokenizer.Tokenizer or string
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: Should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance"""
if isinstance(tok, string_types):
tok = dns.tokenizer.Tokenizer(tok)
cls = get_rdata_class(rdclass, rdtype)
if cls != GenericRdata:
# peek at first token
token = tok.get()
tok.unget(token)
if token.is_identifier() and \
token.value == r'\#':
#
# Known type using the generic syntax. Extract the
# wire form from the generic syntax, and then run
# from_wire on it.
#
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
relativize)
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
origin)
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_wire() class method is called
with the parameters to this function.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance"""
wire = dns.wiredata.maybe_wrap(wire)
cls = get_rdata_class(rdclass, rdtype)
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
| 32.675381
| 78
| 0.62295
|
from io import BytesIO
import base64
import binascii
import dns.exception
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.tokenizer
import dns.wiredata
from ._compat import xrange, string_types, text_type
_hex_chunksize = 32
def _hexify(data, chunksize=_hex_chunksize):
line = binascii.hexlify(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
_base64_chunksize = 32
def _base64ify(data, chunksize=_base64_chunksize):
line = base64.b64encode(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
__escaped = bytearray(b'"\\')
def _escapify(qstring):
if isinstance(qstring, text_type):
qstring = qstring.encode()
if not isinstance(qstring, bytearray):
qstring = bytearray(qstring)
text = ''
for c in qstring:
if c in __escaped:
text += '\\' + chr(c)
elif c >= 0x20 and c < 0x7F:
text += chr(c)
else:
text += '\\%03d' % c
return text
def _truncate_bitmap(what):
for i in xrange(len(what) - 1, -1, -1):
if what[i] != 0:
return what[0: i + 1]
return what[0:1]
class Rdata(object):
__slots__ = ['rdclass', 'rdtype']
def __init__(self, rdclass, rdtype):
self.rdclass = rdclass
self.rdtype = rdtype
def covers(self):
return dns.rdatatype.NONE
def extended_rdatatype(self):
return self.covers() << 16 | self.rdtype
def to_text(self, origin=None, relativize=True, **kw):
raise NotImplementedError
def to_wire(self, file, compress=None, origin=None):
raise NotImplementedError
def to_digestable(self, origin=None):
f = BytesIO()
self.to_wire(f, None, origin)
return f.getvalue()
def validate(self):
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
def __repr__(self):
covers = self.covers()
if covers == dns.rdatatype.NONE:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
str(self) + '>'
def __str__(self):
return self.to_text()
def _cmp(self, other):
our = self.to_digestable(dns.name.root)
their = other.to_digestable(dns.name.root)
if our == their:
return 0
if our > their:
return 1
return -1
def __eq__(self, other):
if not isinstance(other, Rdata):
return False
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Rdata):
return True
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return True
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) > 0
def __hash__(self):
return hash(self.to_digestable(dns.name.root))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
raise NotImplementedError
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
raise NotImplementedError
def choose_relativity(self, origin=None, relativize=True):
pass
class GenericRdata(Rdata):
__slots__ = ['data']
def __init__(self, rdclass, rdtype, data):
super(GenericRdata, self).__init__(rdclass, rdtype)
self.data = data
def to_text(self, origin=None, relativize=True, **kw):
return r'\# %d ' % len(self.data) + _hexify(self.data)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
token = tok.get()
if not token.is_identifier() or token.value != r'\#':
raise dns.exception.SyntaxError(
r'generic rdata does not start with \#')
length = tok.get_int()
chunks = []
while 1:
token = tok.get()
if token.is_eol_or_eof():
break
chunks.append(token.value.encode())
hex = b''.join(chunks)
data = binascii.unhexlify(hex)
if len(data) != length:
raise dns.exception.SyntaxError(
'generic rdata hex data has wrong length')
return cls(rdclass, rdtype, data)
def to_wire(self, file, compress=None, origin=None):
file.write(self.data)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
return cls(rdclass, rdtype, wire[current: current + rdlen])
_rdata_modules = {}
_module_prefix = 'dns.rdtypes'
def get_rdata_class(rdclass, rdtype):
def import_module(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
mod = _rdata_modules.get((rdclass, rdtype))
rdclass_text = dns.rdataclass.to_text(rdclass)
rdtype_text = dns.rdatatype.to_text(rdtype)
rdtype_text = rdtype_text.replace('-', '_')
if not mod:
mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
if not mod:
try:
mod = import_module('.'.join([_module_prefix,
rdclass_text, rdtype_text]))
_rdata_modules[(rdclass, rdtype)] = mod
except ImportError:
try:
mod = import_module('.'.join([_module_prefix,
'ANY', rdtype_text]))
_rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
except ImportError:
mod = None
if mod:
cls = getattr(mod, rdtype_text)
else:
cls = GenericRdata
return cls
def from_text(rdclass, rdtype, tok, origin=None, relativize=True):
if isinstance(tok, string_types):
tok = dns.tokenizer.Tokenizer(tok)
cls = get_rdata_class(rdclass, rdtype)
if cls != GenericRdata:
# peek at first token
token = tok.get()
tok.unget(token)
if token.is_identifier() and \
token.value == r'\#':
#
# Known type using the generic syntax. Extract the
# wire form from the generic syntax, and then run
# from_wire on it.
#
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
relativize)
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
origin)
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None):
wire = dns.wiredata.maybe_wrap(wire)
cls = get_rdata_class(rdclass, rdtype)
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
| true
| true
|
790c3966592b8849b5534e7e6be124a71cf4958c
| 1,558
|
py
|
Python
|
benchmark/benchmark_struct.py
|
JonnoFTW/thriftpy2
|
3b4d28d611a752f8796604ee274f03c517670a43
|
[
"MIT"
] | 491
|
2018-09-26T14:04:25.000Z
|
2022-03-31T10:34:43.000Z
|
benchmark/benchmark_struct.py
|
JonnoFTW/thriftpy2
|
3b4d28d611a752f8796604ee274f03c517670a43
|
[
"MIT"
] | 167
|
2018-09-26T15:56:53.000Z
|
2022-03-29T10:16:47.000Z
|
benchmark/benchmark_struct.py
|
JonnoFTW/thriftpy2
|
3b4d28d611a752f8796604ee274f03c517670a43
|
[
"MIT"
] | 82
|
2018-09-26T12:32:44.000Z
|
2022-03-09T08:57:20.000Z
|
import time
import thriftpy2
from thriftpy2.utils import serialize, deserialize
from thriftpy2.protocol import TBinaryProtocolFactory, TCyBinaryProtocolFactory
addressbook = thriftpy2.load("addressbook.thrift")
def make_addressbook():
phone1 = addressbook.PhoneNumber()
phone1.type = addressbook.PhoneType.MOBILE
phone1.number = b'555-1212'
phone2 = addressbook.PhoneNumber()
phone2.type = addressbook.PhoneType.HOME
phone2.number = b'555-1234'
person = addressbook.Person()
person.name = b"Alice"
person.phones = [phone1, phone2]
person.created_at = 1400000000
ab = addressbook.AddressBook()
ab.people = {person.name: person}
return ab
ab_encoded = serialize(make_addressbook())
def encode(n, proto_factory=TBinaryProtocolFactory()):
ab = make_addressbook()
start = time.time()
for i in range(n):
serialize(ab, proto_factory)
end = time.time()
print("encode\t-> {}".format(end - start))
def decode(n, proto_factory=TBinaryProtocolFactory()):
ab = addressbook.AddressBook()
start = time.time()
for i in range(n):
deserialize(ab, ab_encoded, proto_factory)
end = time.time()
print("decode\t-> {}".format(end - start))
def main():
n = 100000
print("binary protocol struct benchmark for {} times:".format(n))
encode(n)
decode(n)
print("\ncybin protocol struct benchmark for {} times:".format(n))
encode(n, TCyBinaryProtocolFactory())
decode(n, TCyBinaryProtocolFactory())
if __name__ == "__main__":
main()
| 25.966667
| 79
| 0.689345
|
import time
import thriftpy2
from thriftpy2.utils import serialize, deserialize
from thriftpy2.protocol import TBinaryProtocolFactory, TCyBinaryProtocolFactory
addressbook = thriftpy2.load("addressbook.thrift")
def make_addressbook():
phone1 = addressbook.PhoneNumber()
phone1.type = addressbook.PhoneType.MOBILE
phone1.number = b'555-1212'
phone2 = addressbook.PhoneNumber()
phone2.type = addressbook.PhoneType.HOME
phone2.number = b'555-1234'
person = addressbook.Person()
person.name = b"Alice"
person.phones = [phone1, phone2]
person.created_at = 1400000000
ab = addressbook.AddressBook()
ab.people = {person.name: person}
return ab
ab_encoded = serialize(make_addressbook())
def encode(n, proto_factory=TBinaryProtocolFactory()):
ab = make_addressbook()
start = time.time()
for i in range(n):
serialize(ab, proto_factory)
end = time.time()
print("encode\t-> {}".format(end - start))
def decode(n, proto_factory=TBinaryProtocolFactory()):
ab = addressbook.AddressBook()
start = time.time()
for i in range(n):
deserialize(ab, ab_encoded, proto_factory)
end = time.time()
print("decode\t-> {}".format(end - start))
def main():
n = 100000
print("binary protocol struct benchmark for {} times:".format(n))
encode(n)
decode(n)
print("\ncybin protocol struct benchmark for {} times:".format(n))
encode(n, TCyBinaryProtocolFactory())
decode(n, TCyBinaryProtocolFactory())
if __name__ == "__main__":
main()
| true
| true
|
790c3a2d91e74039c115f557f9aa9936d7ff5e74
| 2,623
|
py
|
Python
|
dct_image_transform/reflection.py
|
kanpurin/dctimagetransform
|
b5950945922e4eafc17bb88fd28dfe5167ca3529
|
[
"MIT"
] | null | null | null |
dct_image_transform/reflection.py
|
kanpurin/dctimagetransform
|
b5950945922e4eafc17bb88fd28dfe5167ca3529
|
[
"MIT"
] | null | null | null |
dct_image_transform/reflection.py
|
kanpurin/dctimagetransform
|
b5950945922e4eafc17bb88fd28dfe5167ca3529
|
[
"MIT"
] | null | null | null |
import numpy as np
from dct_image_transform.dct import dct2
def reflection(image,axis=0):
'''
8x8のブロックごとに離散コサイン変換された画像(以下DCT画像)を鏡像変換する.
Parameters
----------
image:幅と高さが8の倍数である画像を表す2次元配列. 8の倍数でない場合の動作は未定義.
axis:変換する軸. defalutは`axis=0`
Returns
-------
`image`を鏡像変換したDCT画像を表す2次元配列を返す. `image`の値は変わらない.
Examples
--------
>>> import numpy as np
>>> a = np.arange(64).reshape((8,8))
>>> a
array([[ 0, 1, 2, 3, 4, 5, 6, 7],
[ 8, 9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 52, 53, 54, 55],
[56, 57, 58, 59, 60, 61, 62, 63]])
>>> dct_image_transform.reflection.reflection(a,axis=0)
array([[ 5.77395663e-15, 1.00000000e+00, 2.00000000e+00,
3.00000000e+00, 4.00000000e+00, 5.00000000e+00,
6.00000000e+00, 7.00000000e+00],
[-8.00000000e+00, -9.00000000e+00, -1.00000000e+01,
-1.10000000e+01, -1.20000000e+01, -1.30000000e+01,
-1.40000000e+01, -1.50000000e+01],
[ 1.60000000e+01, 1.70000000e+01, 1.80000000e+01,
1.90000000e+01, 2.00000000e+01, 2.10000000e+01,
2.20000000e+01, 2.30000000e+01],
[-2.40000000e+01, -2.50000000e+01, -2.60000000e+01,
-2.70000000e+01, -2.80000000e+01, -2.90000000e+01,
-3.00000000e+01, -3.10000000e+01],
[ 3.20000000e+01, 3.30000000e+01, 3.40000000e+01,
3.50000000e+01, 3.60000000e+01, 3.70000000e+01,
3.80000000e+01, 3.90000000e+01],
[-4.00000000e+01, -4.10000000e+01, -4.20000000e+01,
-4.30000000e+01, -4.40000000e+01, -4.50000000e+01,
-4.60000000e+01, -4.70000000e+01],
[ 4.80000000e+01, 4.90000000e+01, 5.00000000e+01,
5.10000000e+01, 5.20000000e+01, 5.30000000e+01,
5.40000000e+01, 5.50000000e+01],
[-5.60000000e+01, -5.70000000e+01, -5.80000000e+01,
-5.90000000e+01, -6.00000000e+01, -6.10000000e+01,
-6.20000000e+01, -6.30000000e+01]])
'''
R = np.zeros((8,8),dtype=np.float)
for i in range(8):
R[i,7-i] = 1
R = dct2(R)
if axis == 0:
return np.vstack(list(map(lambda m:np.dot(R,m),np.flip(np.vsplit(image,range(8,image.shape[1],8)),0))))
elif axis == 1:
return np.hstack(list(map(lambda m:np.dot(m,R),np.flip(np.hsplit(image,range(8,image.shape[1],8)),0))))
| 40.984375
| 111
| 0.544034
|
import numpy as np
from dct_image_transform.dct import dct2
def reflection(image,axis=0):
R = np.zeros((8,8),dtype=np.float)
for i in range(8):
R[i,7-i] = 1
R = dct2(R)
if axis == 0:
return np.vstack(list(map(lambda m:np.dot(R,m),np.flip(np.vsplit(image,range(8,image.shape[1],8)),0))))
elif axis == 1:
return np.hstack(list(map(lambda m:np.dot(m,R),np.flip(np.hsplit(image,range(8,image.shape[1],8)),0))))
| true
| true
|
790c3ae8e8bf27683b6097baed91201afc4e0e47
| 2,324
|
py
|
Python
|
libs/bahan_ajar/bahan_ajar.py
|
hexatester/ut-telegram-bot
|
20f6f063726913cb6d21e42538103e3498b929a7
|
[
"MIT"
] | 3
|
2020-09-15T23:10:32.000Z
|
2021-01-23T18:17:34.000Z
|
libs/bahan_ajar/bahan_ajar.py
|
hexatester/ut-telegram-bot
|
20f6f063726913cb6d21e42538103e3498b929a7
|
[
"MIT"
] | null | null | null |
libs/bahan_ajar/bahan_ajar.py
|
hexatester/ut-telegram-bot
|
20f6f063726913cb6d21e42538103e3498b929a7
|
[
"MIT"
] | 1
|
2020-08-26T16:02:03.000Z
|
2020-08-26T16:02:03.000Z
|
import requests
from logging import getLogger
from bs4 import BeautifulSoup
from requests import Session
from typing import List
from urllib.parse import quote_plus
from .book import Book
from ..base import HEADERS
class BahanAjar:
def __init__(self, email: str, password: str, login: bool = True):
self.session: Session = Session()
self.session.headers.update(HEADERS)
self.email = email
self.password = password
self._my_books: List[Book] = []
self.logger = getLogger(self.__class__.__name__)
if login and self.login():
self.logger.debug("Berhasil login ke bahan ajar")
def login(self, email: str = None, password: str = None) -> bool:
try:
email = email if email else self.email
password = password if password else self.password
url = f"http://bahanajar.ut.ac.id/Homes/login_frame/{email}/{password}//////?service="
res = self.session.post(url)
return res.ok
except Exception as E:
self.logger.exception(E)
return False
@property
def my_books(self) -> List[Book]:
if self._my_books:
return self._my_books
url = "http://bahanajar.ut.ac.id/Homes/my_books"
res = self.session.get(url)
if not res.ok or "No books are available" in res.text:
return []
soup: BeautifulSoup = BeautifulSoup(res.text, "lxml")
soup = soup.find("div", id="bookHolder").find_all(
"div", class_="publib_bkthumb"
)
if not len(soup) > 0:
return []
else:
self._my_books = [Book.from_bkthumb(bktm) for bktm in soup]
return self._my_books
@my_books.deleter
def my_books(self):
del self._my_books
@staticmethod
def search(query: str, start: int = 0) -> List[Book]:
url = f"http://bahanajar.ut.ac.id/ebookstore/ajax_load_search_books/0/{quote_plus(query)}"
res = requests.get(url)
if not res.ok:
return []
soup: BeautifulSoup = BeautifulSoup(res.text, "lxml")
soup = soup.find("div", class_="book_stnd").find_all("div", class_="newb_bg")
if not len(soup) > 0:
return []
return [Book.from_newb_bg(newb_bg) for newb_bg in soup]
| 35.212121
| 98
| 0.609725
|
import requests
from logging import getLogger
from bs4 import BeautifulSoup
from requests import Session
from typing import List
from urllib.parse import quote_plus
from .book import Book
from ..base import HEADERS
class BahanAjar:
def __init__(self, email: str, password: str, login: bool = True):
self.session: Session = Session()
self.session.headers.update(HEADERS)
self.email = email
self.password = password
self._my_books: List[Book] = []
self.logger = getLogger(self.__class__.__name__)
if login and self.login():
self.logger.debug("Berhasil login ke bahan ajar")
def login(self, email: str = None, password: str = None) -> bool:
try:
email = email if email else self.email
password = password if password else self.password
url = f"http://bahanajar.ut.ac.id/Homes/login_frame/{email}/{password}//////?service="
res = self.session.post(url)
return res.ok
except Exception as E:
self.logger.exception(E)
return False
@property
def my_books(self) -> List[Book]:
if self._my_books:
return self._my_books
url = "http://bahanajar.ut.ac.id/Homes/my_books"
res = self.session.get(url)
if not res.ok or "No books are available" in res.text:
return []
soup: BeautifulSoup = BeautifulSoup(res.text, "lxml")
soup = soup.find("div", id="bookHolder").find_all(
"div", class_="publib_bkthumb"
)
if not len(soup) > 0:
return []
else:
self._my_books = [Book.from_bkthumb(bktm) for bktm in soup]
return self._my_books
@my_books.deleter
def my_books(self):
del self._my_books
@staticmethod
def search(query: str, start: int = 0) -> List[Book]:
url = f"http://bahanajar.ut.ac.id/ebookstore/ajax_load_search_books/0/{quote_plus(query)}"
res = requests.get(url)
if not res.ok:
return []
soup: BeautifulSoup = BeautifulSoup(res.text, "lxml")
soup = soup.find("div", class_="book_stnd").find_all("div", class_="newb_bg")
if not len(soup) > 0:
return []
return [Book.from_newb_bg(newb_bg) for newb_bg in soup]
| true
| true
|
790c3b46e34aae8a0d1f2c0f61445f788fda2fe1
| 405
|
py
|
Python
|
mysite/ViralScreener/migrations/0009_auto_20200326_0339.py
|
memtech3/Viral-Screener
|
1940a7e097a9f5b7eb78f001c0a9623b398c020e
|
[
"MIT"
] | 1
|
2020-03-25T22:23:35.000Z
|
2020-03-25T22:23:35.000Z
|
mysite/ViralScreener/migrations/0009_auto_20200326_0339.py
|
memtech3/Viral-Screener
|
1940a7e097a9f5b7eb78f001c0a9623b398c020e
|
[
"MIT"
] | 7
|
2020-03-23T04:51:55.000Z
|
2020-03-25T22:23:14.000Z
|
mysite/ViralScreener/migrations/0009_auto_20200326_0339.py
|
memtech3/Viral-Screener
|
1940a7e097a9f5b7eb78f001c0a9623b398c020e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.4 on 2020-03-26 03:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ViralScreener', '0008_auto_20200326_0338'),
]
operations = [
migrations.AlterField(
model_name='employeescreeningresponses',
name='DateTime',
field=models.DateTimeField(),
),
]
| 21.315789
| 53
| 0.619753
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ViralScreener', '0008_auto_20200326_0338'),
]
operations = [
migrations.AlterField(
model_name='employeescreeningresponses',
name='DateTime',
field=models.DateTimeField(),
),
]
| true
| true
|
790c3ba506bf01e517ab768e5a2f46125b512fb2
| 3,384
|
py
|
Python
|
utils/plot.py
|
donghwijung/LoRCoN-LO
|
37d4f97d2ae01a2dca1d086579ca3efaab77553b
|
[
"MIT"
] | 3
|
2022-03-14T07:21:41.000Z
|
2022-03-25T11:21:23.000Z
|
utils/plot.py
|
donghwijung/LoRCoN-LO
|
37d4f97d2ae01a2dca1d086579ca3efaab77553b
|
[
"MIT"
] | null | null | null |
utils/plot.py
|
donghwijung/LoRCoN-LO
|
37d4f97d2ae01a2dca1d086579ca3efaab77553b
|
[
"MIT"
] | 1
|
2022-03-31T05:34:25.000Z
|
2022-03-31T05:34:25.000Z
|
import os
import numpy as np
import matplotlib.pyplot as plt
# import sys, os
# sys.path.append(os.path.join(os.path.dirname(__file__), 'utils'))
import process_data
import common
def plot_gt(Y_origin_data, pose_folder, preprocessed_folder, data_seqs, seq_sizes, dim="2d", save_graph=True, dataset="KITTI"):
start_idx = 0
end_idx = 0
additional_row = np.array([0, 0, 0, 1], dtype=np.float64)
for seq in data_seqs:
end_idx += seq_sizes[seq]
origin_poses = np.zeros((Y_origin_data[start_idx:end_idx].shape[0], 4,4),dtype=np.float64)
for idx, row in enumerate(Y_origin_data[start_idx:end_idx]):
new_pose = np.array(list(map(float, row.strip().split(" "))), dtype=np.float64)
new_pose = np.concatenate((new_pose, additional_row))
new_pose = new_pose.reshape(4,4)
origin_poses[idx] = new_pose
fig = plt.figure(figsize=(10,10))
if dim == "2d":
plt.scatter(origin_poses[:,0,3],origin_poses[:,1,3], c=origin_poses[:,2,3], s=20, alpha=0.5)
else: # 3d
ax = fig.add_subplot(111, projection='3d')
ax.scatter(origin_poses[:,0,3],origin_poses[:,1,3],origin_poses[:,2,3],c=origin_poses[:,1,3], s=20, alpha=0.5)
if save_graph:
graph_folder = os.path.join('result', dataset, 'graph')
os.makedirs(graph_folder, exist_ok=True)
plt.savefig(os.path.join(graph_folder, f"gt_{seq}_{dim}.png"))
# plt.close(fig)
start_idx += seq_sizes[seq]
def plot_results(Y_origin_data, Y_estimated_data, data_seqs, rnn_size, seq_sizes, dim="2d", save_graph=True, dataset="KITTI"):
start_idx = 0
end_idx = 0
additional_row = np.array([0, 0, 0, 1], dtype=np.float64)
for i, seq in enumerate(data_seqs):
end_idx += seq_sizes[seq]
poses = np.zeros((Y_origin_data[start_idx:end_idx].shape[0], 4,4),dtype=np.float64)
for idx in range(rnn_size):
current_pose = np.array(list(map(float, Y_origin_data[start_idx+idx].strip().split(" "))), dtype=np.float64)
current_pose = np.concatenate((current_pose, additional_row))
current_pose = current_pose.reshape(4,4)
poses[idx] = current_pose
for idx, relative_pose in enumerate(Y_estimated_data[start_idx-i*rnn_size:end_idx-(i+1)*rnn_size]):
rot_mat = common.euler_to_rot_mat(relative_pose[5],relative_pose[4],relative_pose[3])
trans_mat = np.identity(4)
trans_mat[:3,:3]=rot_mat
trans_mat[0,3]=relative_pose[0]
trans_mat[1,3]=relative_pose[1]
trans_mat[2,3]=relative_pose[2]
current_pose = np.dot(current_pose, trans_mat)
poses[idx + rnn_size] = current_pose
fig = plt.figure(figsize=(10,10))
if dim == "2d":
plt.scatter(poses[:,0,3],poses[:,1,3], c=poses[:,2,3], s=20, alpha=0.5)
else: # 3d
ax = fig.add_subplot(111, projection='3d')
ax.scatter(poses[:,0,3],poses[:,1,3],poses[:,2,3],c=poses[:,1,3], s=20, alpha=0.5)
if save_graph:
graph_folder = os.path.join('result', dataset, 'graph')
os.makedirs(graph_folder, exist_ok=True)
plt.savefig(os.path.join(graph_folder, f"est_{seq}_{dim}.png"))
# plt.close(fig)
start_idx += seq_sizes[seq]
| 44.526316
| 134
| 0.618794
|
import os
import numpy as np
import matplotlib.pyplot as plt
import process_data
import common
def plot_gt(Y_origin_data, pose_folder, preprocessed_folder, data_seqs, seq_sizes, dim="2d", save_graph=True, dataset="KITTI"):
start_idx = 0
end_idx = 0
additional_row = np.array([0, 0, 0, 1], dtype=np.float64)
for seq in data_seqs:
end_idx += seq_sizes[seq]
origin_poses = np.zeros((Y_origin_data[start_idx:end_idx].shape[0], 4,4),dtype=np.float64)
for idx, row in enumerate(Y_origin_data[start_idx:end_idx]):
new_pose = np.array(list(map(float, row.strip().split(" "))), dtype=np.float64)
new_pose = np.concatenate((new_pose, additional_row))
new_pose = new_pose.reshape(4,4)
origin_poses[idx] = new_pose
fig = plt.figure(figsize=(10,10))
if dim == "2d":
plt.scatter(origin_poses[:,0,3],origin_poses[:,1,3], c=origin_poses[:,2,3], s=20, alpha=0.5)
else:
ax = fig.add_subplot(111, projection='3d')
ax.scatter(origin_poses[:,0,3],origin_poses[:,1,3],origin_poses[:,2,3],c=origin_poses[:,1,3], s=20, alpha=0.5)
if save_graph:
graph_folder = os.path.join('result', dataset, 'graph')
os.makedirs(graph_folder, exist_ok=True)
plt.savefig(os.path.join(graph_folder, f"gt_{seq}_{dim}.png"))
start_idx += seq_sizes[seq]
def plot_results(Y_origin_data, Y_estimated_data, data_seqs, rnn_size, seq_sizes, dim="2d", save_graph=True, dataset="KITTI"):
start_idx = 0
end_idx = 0
additional_row = np.array([0, 0, 0, 1], dtype=np.float64)
for i, seq in enumerate(data_seqs):
end_idx += seq_sizes[seq]
poses = np.zeros((Y_origin_data[start_idx:end_idx].shape[0], 4,4),dtype=np.float64)
for idx in range(rnn_size):
current_pose = np.array(list(map(float, Y_origin_data[start_idx+idx].strip().split(" "))), dtype=np.float64)
current_pose = np.concatenate((current_pose, additional_row))
current_pose = current_pose.reshape(4,4)
poses[idx] = current_pose
for idx, relative_pose in enumerate(Y_estimated_data[start_idx-i*rnn_size:end_idx-(i+1)*rnn_size]):
rot_mat = common.euler_to_rot_mat(relative_pose[5],relative_pose[4],relative_pose[3])
trans_mat = np.identity(4)
trans_mat[:3,:3]=rot_mat
trans_mat[0,3]=relative_pose[0]
trans_mat[1,3]=relative_pose[1]
trans_mat[2,3]=relative_pose[2]
current_pose = np.dot(current_pose, trans_mat)
poses[idx + rnn_size] = current_pose
fig = plt.figure(figsize=(10,10))
if dim == "2d":
plt.scatter(poses[:,0,3],poses[:,1,3], c=poses[:,2,3], s=20, alpha=0.5)
else:
ax = fig.add_subplot(111, projection='3d')
ax.scatter(poses[:,0,3],poses[:,1,3],poses[:,2,3],c=poses[:,1,3], s=20, alpha=0.5)
if save_graph:
graph_folder = os.path.join('result', dataset, 'graph')
os.makedirs(graph_folder, exist_ok=True)
plt.savefig(os.path.join(graph_folder, f"est_{seq}_{dim}.png"))
start_idx += seq_sizes[seq]
| true
| true
|
790c3c6a22d26170d1307906c5f02c3b1ad1d748
| 4,145
|
py
|
Python
|
src/build_tools/change_reference_mac.py
|
dancerj/mozc
|
a5a4927c1f709d2ff0c681585c746f73a434e4c9
|
[
"BSD-3-Clause"
] | null | null | null |
src/build_tools/change_reference_mac.py
|
dancerj/mozc
|
a5a4927c1f709d2ff0c681585c746f73a434e4c9
|
[
"BSD-3-Clause"
] | 1
|
2021-06-30T14:59:51.000Z
|
2021-06-30T15:31:56.000Z
|
src/build_tools/change_reference_mac.py
|
dancerj/mozc
|
a5a4927c1f709d2ff0c681585c746f73a434e4c9
|
[
"BSD-3-Clause"
] | 1
|
2022-03-25T09:01:39.000Z
|
2022-03-25T09:01:39.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2010-2020, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
r"""Change the reference to frameworks.
Typical usage:
% change_reference_mac.py --qtdir=/path/to/qtdir/ \
--target=/path/to/target.app/Contents/MacOS/target
"""
__author__ = "horo"
import optparse
import os
from util import PrintErrorAndExit
from util import RunOrDie
def ParseOption():
"""Parse command line options."""
parser = optparse.OptionParser()
parser.add_option('--qtdir', dest='qtdir')
parser.add_option('--target', dest='target')
(opts, _) = parser.parse_args()
return opts
def GetFrameworkPath(name, version):
return '%s.framework/Versions/%s/%s' % (name, version, name)
def GetReferenceTo(framework):
return ('@executable_path/../../../ConfigDialog.app/Contents/Frameworks/%s' %
framework)
def InstallNameTool(target, reference_from, reference_to):
cmd = ['install_name_tool', '-change', reference_from, reference_to, target]
RunOrDie(cmd)
def main():
opt = ParseOption()
if not opt.qtdir:
PrintErrorAndExit('--qtdir option is mandatory.')
if not opt.target:
PrintErrorAndExit('--target option is mandatory.')
unused_qtdir = os.path.abspath(opt.qtdir) # TODO(komatsu): remove this.
target = os.path.abspath(opt.target)
# Changes the reference to QtCore framework from the target application
# From: @rpath/QtCore.framework/Versions/5/QtCore
# To: @executable_path/../../../MozcTool.app/Contents/Frameworks/...
qtcore_framework = GetFrameworkPath('QtCore', '5')
InstallNameTool(target,
'@rpath/%s' % qtcore_framework,
GetReferenceTo(qtcore_framework))
# Changes the reference to QtGui framework from the target application
qtgui_framework = GetFrameworkPath('QtGui', '5')
InstallNameTool(target,
'@rpath/%s' % qtgui_framework,
GetReferenceTo(qtgui_framework))
# Changes the reference to QtWidgets framework from the target application
qtwidgets_framework = GetFrameworkPath('QtWidgets', '5')
InstallNameTool(target,
'@rpath/%s' % qtwidgets_framework,
GetReferenceTo(qtwidgets_framework))
# Change the reference to $(branding)Tool_lib from the target application
# From: @executable_path/../Frameworks/MozcTool_lib.framework/...
# To: @executable_path/../../../ConfigDialog.app/Contents/Frameworks/...
toollib_framework = GetFrameworkPath('GuiTool_lib', 'A')
InstallNameTool(target,
'@executable_path/../Frameworks/%s' % toollib_framework,
GetReferenceTo(toollib_framework))
if __name__ == '__main__':
main()
| 35.732759
| 79
| 0.723281
|
__author__ = "horo"
import optparse
import os
from util import PrintErrorAndExit
from util import RunOrDie
def ParseOption():
parser = optparse.OptionParser()
parser.add_option('--qtdir', dest='qtdir')
parser.add_option('--target', dest='target')
(opts, _) = parser.parse_args()
return opts
def GetFrameworkPath(name, version):
return '%s.framework/Versions/%s/%s' % (name, version, name)
def GetReferenceTo(framework):
return ('@executable_path/../../../ConfigDialog.app/Contents/Frameworks/%s' %
framework)
def InstallNameTool(target, reference_from, reference_to):
cmd = ['install_name_tool', '-change', reference_from, reference_to, target]
RunOrDie(cmd)
def main():
opt = ParseOption()
if not opt.qtdir:
PrintErrorAndExit('--qtdir option is mandatory.')
if not opt.target:
PrintErrorAndExit('--target option is mandatory.')
unused_qtdir = os.path.abspath(opt.qtdir)
target = os.path.abspath(opt.target)
qtcore_framework = GetFrameworkPath('QtCore', '5')
InstallNameTool(target,
'@rpath/%s' % qtcore_framework,
GetReferenceTo(qtcore_framework))
qtgui_framework = GetFrameworkPath('QtGui', '5')
InstallNameTool(target,
'@rpath/%s' % qtgui_framework,
GetReferenceTo(qtgui_framework))
qtwidgets_framework = GetFrameworkPath('QtWidgets', '5')
InstallNameTool(target,
'@rpath/%s' % qtwidgets_framework,
GetReferenceTo(qtwidgets_framework))
toollib_framework = GetFrameworkPath('GuiTool_lib', 'A')
InstallNameTool(target,
'@executable_path/../Frameworks/%s' % toollib_framework,
GetReferenceTo(toollib_framework))
if __name__ == '__main__':
main()
| true
| true
|
790c3d645e7c8342081080e3315a5ea07ed93f83
| 543
|
py
|
Python
|
homie/node/property/property_enum.py
|
dresber/HomieV3
|
05115f59f4e78230d09ca4e9bd7a12589a638a81
|
[
"MIT"
] | 4
|
2019-06-10T01:18:06.000Z
|
2021-12-28T03:00:48.000Z
|
homie/node/property/property_enum.py
|
dresber/HomieV3
|
05115f59f4e78230d09ca4e9bd7a12589a638a81
|
[
"MIT"
] | 9
|
2019-05-02T20:03:42.000Z
|
2020-01-15T03:41:38.000Z
|
homie/node/property/property_enum.py
|
dresber/HomieV3
|
05115f59f4e78230d09ca4e9bd7a12589a638a81
|
[
"MIT"
] | 7
|
2019-05-05T15:37:49.000Z
|
2020-04-02T10:33:50.000Z
|
import logging
from .property_base import Property_Base
logger = logging.getLogger(__name__)
class Property_Enum(Property_Base):
def __init__(self, node, id, name, settable=True, retained=True, qos=1, unit=None, data_type='enum', data_format=None, value=None, set_value=None):
assert(data_format)
super().__init__(node,id,name,settable,retained,qos,unit,data_type,data_format,value,set_value)
self.enum_list = data_format.split(',')
def validate_value(self, value):
return value in self.enum_list
| 31.941176
| 151
| 0.734807
|
import logging
from .property_base import Property_Base
logger = logging.getLogger(__name__)
class Property_Enum(Property_Base):
def __init__(self, node, id, name, settable=True, retained=True, qos=1, unit=None, data_type='enum', data_format=None, value=None, set_value=None):
assert(data_format)
super().__init__(node,id,name,settable,retained,qos,unit,data_type,data_format,value,set_value)
self.enum_list = data_format.split(',')
def validate_value(self, value):
return value in self.enum_list
| true
| true
|
790c3da2c595f5dff7ef6735b4ea35b53ba4e72a
| 13,629
|
py
|
Python
|
rcnn/symbol/symbol_resnet_modify.py
|
angelfish91/Faster-RCNN-MXnet011
|
211a8f03647935b9680800a92009c7d966a60b72
|
[
"Apache-2.0"
] | 4
|
2018-02-08T21:22:11.000Z
|
2020-06-23T02:51:59.000Z
|
rcnn/symbol/symbol_resnet_modify.py
|
angelfish91/Faster-RCNN-MXnet011
|
211a8f03647935b9680800a92009c7d966a60b72
|
[
"Apache-2.0"
] | null | null | null |
rcnn/symbol/symbol_resnet_modify.py
|
angelfish91/Faster-RCNN-MXnet011
|
211a8f03647935b9680800a92009c7d966a60b72
|
[
"Apache-2.0"
] | 2
|
2018-02-02T10:34:00.000Z
|
2019-12-19T02:44:05.000Z
|
import mxnet as mx
import proposal
import proposal_target
from rcnn.config import config
import focal_loss
eps = 2e-5
use_global_stats = True
workspace = 512
res_deps = {'50': (3, 4, 6, 3), '101': (3, 4, 23, 3), '152': (3, 8, 36, 3), '200': (3, 24, 36, 3)}
units = res_deps['101']
filter_list = [256, 512, 1024, 2048]
def residual_unit(data, num_filter, stride, dim_match, name):
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=int(num_filter * 0.25), kernel=(1, 1), stride=(1, 1), pad=(0, 0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=int(num_filter * 0.25), kernel=(3, 3), stride=stride, pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name=name + '_bn3')
act3 = mx.sym.Activation(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = mx.sym.Convolution(data=act3, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True,
workspace=workspace, name=name + '_conv3')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True,
workspace=workspace, name=name + '_sc')
sum = mx.sym.ElementWiseSum(*[conv3, shortcut], name=name + '_plus')
return sum
def get_resnet_conv(data):
# res1
data_bn = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=eps, use_global_stats=use_global_stats, name='bn_data')
conv0 = mx.sym.Convolution(data=data_bn, num_filter=64, kernel=(7, 7), stride=(2, 2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
bn0 = mx.sym.BatchNorm(data=conv0, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name='bn0')
relu0 = mx.sym.Activation(data=bn0, act_type='relu', name='relu0')
pool0 = mx.symbol.Pooling(data=relu0, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max', name='pool0')
# res2
unit = residual_unit(data=pool0, num_filter=filter_list[0], stride=(1, 1), dim_match=False, name='stage1_unit1')
for i in range(2, units[0] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[0], stride=(1, 1), dim_match=True, name='stage1_unit%s' % i)
# res3
unit = residual_unit(data=unit, num_filter=filter_list[1], stride=(2, 2), dim_match=False, name='stage2_unit1')
for i in range(2, units[1] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[1], stride=(1, 1), dim_match=True, name='stage2_unit%s' % i)
# res4
unit = residual_unit(data=unit, num_filter=filter_list[2], stride=(2, 2), dim_match=False, name='stage3_unit1')
for i in range(2, units[2] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[2], stride=(1, 1), dim_match=True, name='stage3_unit%s' % i)
return unit
def get_resnet_train(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS):
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
gt_boxes = mx.symbol.Variable(name="gt_boxes")
rpn_label = mx.symbol.Variable(name='label')
rpn_bbox_target = mx.symbol.Variable(name='bbox_target')
rpn_bbox_weight = mx.symbol.Variable(name='bbox_weight')
# shared convolutional layers
conv_feat = get_resnet_conv(data)
# RPN layers
rpn_conv = mx.symbol.Convolution(
data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# prepare rpn data
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
# classification
rpn_cls_prob = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape, label=rpn_label, multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1, name="rpn_cls_prob")
# bounding box regression
rpn_bbox_loss_ = rpn_bbox_weight * mx.symbol.smooth_l1(name='rpn_bbox_loss_', scalar=3.0, data=(rpn_bbox_pred - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss_, grad_scale=1.0 / config.TRAIN.RPN_BATCH_SIZE)
# ROI proposal
rpn_cls_act = mx.symbol.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_act")
rpn_cls_act_reshape = mx.symbol.Reshape(
data=rpn_cls_act, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_act_reshape')
if config.TRAIN.CXX_PROPOSAL:
rois = mx.contrib.symbol.Proposal(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=config.RPN_FEAT_STRIDE, scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TRAIN.RPN_POST_NMS_TOP_N,
threshold=config.TRAIN.RPN_NMS_THRESH, rpn_min_size=config.TRAIN.RPN_MIN_SIZE)
else:
rois = mx.symbol.Custom(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=config.RPN_FEAT_STRIDE,
scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TRAIN.RPN_POST_NMS_TOP_N,
threshold=config.TRAIN.RPN_NMS_THRESH, rpn_min_size=config.TRAIN.RPN_MIN_SIZE)
# ROI proposal target
gt_boxes_reshape = mx.symbol.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')
group = mx.symbol.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target',
num_classes=num_classes, batch_images=config.TRAIN.BATCH_IMAGES,
batch_rois=config.TRAIN.BATCH_ROIS, fg_fraction=config.TRAIN.FG_FRACTION)
rois = group[0]
label = group[1]
bbox_target = group[2]
bbox_weight = group[3]
# Fast R-CNN
roi_pool = mx.symbol.ROIPooling(
name='roi_pool5', data=conv_feat, rois=rois, pooled_size=(14, 14), spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
# res5
unit = residual_unit(data=roi_pool, num_filter=filter_list[3], stride=(2, 2), dim_match=False, name='stage4_unit1')
for i in range(2, units[3] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[3], stride=(1, 1), dim_match=True, name='stage4_unit%s' % i)
bn1 = mx.sym.BatchNorm(data=unit, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name='bn1')
relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
pool1 = mx.symbol.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
# classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=pool1, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=pool1, num_hidden=num_classes * 4)
if config.TRAIN.ENABLE_OHEM:
labels_ohem, bbox_weights_ohem = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, \
num_reg_classes=num_classes, roi_per_img=config.TRAIN.BATCH_ROIS_OHEM, cls_score=cls_score, \
bbox_pred=bbox_pred, labels=label, bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, \
normalization='valid', use_ignore=True, ignore_label=-1)
bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, \
data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / config.TRAIN.BATCH_ROIS_OHEM)
elif config.TRAIN.ENABLE_FOCALLOSS:
#cls_prob = mx.symbol.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='batch')
cls_prob = mx.sym.Custom(op_type='FocalLoss', name='cls_prob', data=cls_score, labels=label, gamma= 2,alpha = 0.25)
bbox_loss_ = bbox_weight * mx.symbol.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / config.TRAIN.BATCH_ROIS)
else:
cls_prob = mx.symbol.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='batch')
bbox_loss_ = bbox_weight * mx.symbol.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / config.TRAIN.BATCH_ROIS)
# reshape output
label = mx.symbol.Reshape(data=label, shape=(config.TRAIN.BATCH_IMAGES, -1), name='label_reshape')
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.symbol.Reshape(data=bbox_loss, shape=(config.TRAIN.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_loss_reshape')
group = mx.symbol.Group([rpn_cls_prob, rpn_bbox_loss, cls_prob, bbox_loss, mx.symbol.BlockGrad(label)])
return group
def get_resnet_test(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS):
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
# shared convolutional layers
conv_feat = get_resnet_conv(data)
# RPN
rpn_conv = mx.symbol.Convolution(
data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# ROI Proposal
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_prob = mx.symbol.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_prob")
rpn_cls_prob_reshape = mx.symbol.Reshape(
data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape')
if config.TEST.CXX_PROPOSAL:
rois = mx.contrib.symbol.Proposal(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=config.RPN_FEAT_STRIDE, scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TEST.RPN_POST_NMS_TOP_N,
threshold=config.TEST.RPN_NMS_THRESH, rpn_min_size=config.TEST.RPN_MIN_SIZE)
else:
rois = mx.symbol.Custom(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=config.RPN_FEAT_STRIDE,
scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TEST.RPN_POST_NMS_TOP_N,
threshold=config.TEST.RPN_NMS_THRESH, rpn_min_size=config.TEST.RPN_MIN_SIZE)
# Fast R-CNN
roi_pool = mx.symbol.ROIPooling(
name='roi_pool5', data=conv_feat, rois=rois, pooled_size=(14, 14), spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
# res5
unit = residual_unit(data=roi_pool, num_filter=filter_list[3], stride=(2, 2), dim_match=False, name='stage4_unit1')
for i in range(2, units[3] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[3], stride=(1, 1), dim_match=True, name='stage4_unit%s' % i)
bn1 = mx.sym.BatchNorm(data=unit, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name='bn1')
relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
pool1 = mx.symbol.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
# classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=pool1, num_hidden=num_classes)
cls_prob = mx.symbol.softmax(name='cls_prob', data=cls_score)
# bounding box regression
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=pool1, num_hidden=num_classes * 4)
# reshape output
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.symbol.Reshape(data=bbox_pred, shape=(config.TEST.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_pred_reshape')
# group output
group = mx.symbol.Group([rois, cls_prob, bbox_pred])
return group
| 59
| 133
| 0.696163
|
import mxnet as mx
import proposal
import proposal_target
from rcnn.config import config
import focal_loss
eps = 2e-5
use_global_stats = True
workspace = 512
res_deps = {'50': (3, 4, 6, 3), '101': (3, 4, 23, 3), '152': (3, 8, 36, 3), '200': (3, 24, 36, 3)}
units = res_deps['101']
filter_list = [256, 512, 1024, 2048]
def residual_unit(data, num_filter, stride, dim_match, name):
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=int(num_filter * 0.25), kernel=(1, 1), stride=(1, 1), pad=(0, 0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=int(num_filter * 0.25), kernel=(3, 3), stride=stride, pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name=name + '_bn3')
act3 = mx.sym.Activation(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = mx.sym.Convolution(data=act3, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True,
workspace=workspace, name=name + '_conv3')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True,
workspace=workspace, name=name + '_sc')
sum = mx.sym.ElementWiseSum(*[conv3, shortcut], name=name + '_plus')
return sum
def get_resnet_conv(data):
data_bn = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=eps, use_global_stats=use_global_stats, name='bn_data')
conv0 = mx.sym.Convolution(data=data_bn, num_filter=64, kernel=(7, 7), stride=(2, 2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
bn0 = mx.sym.BatchNorm(data=conv0, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name='bn0')
relu0 = mx.sym.Activation(data=bn0, act_type='relu', name='relu0')
pool0 = mx.symbol.Pooling(data=relu0, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max', name='pool0')
unit = residual_unit(data=pool0, num_filter=filter_list[0], stride=(1, 1), dim_match=False, name='stage1_unit1')
for i in range(2, units[0] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[0], stride=(1, 1), dim_match=True, name='stage1_unit%s' % i)
unit = residual_unit(data=unit, num_filter=filter_list[1], stride=(2, 2), dim_match=False, name='stage2_unit1')
for i in range(2, units[1] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[1], stride=(1, 1), dim_match=True, name='stage2_unit%s' % i)
unit = residual_unit(data=unit, num_filter=filter_list[2], stride=(2, 2), dim_match=False, name='stage3_unit1')
for i in range(2, units[2] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[2], stride=(1, 1), dim_match=True, name='stage3_unit%s' % i)
return unit
def get_resnet_train(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS):
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
gt_boxes = mx.symbol.Variable(name="gt_boxes")
rpn_label = mx.symbol.Variable(name='label')
rpn_bbox_target = mx.symbol.Variable(name='bbox_target')
rpn_bbox_weight = mx.symbol.Variable(name='bbox_weight')
conv_feat = get_resnet_conv(data)
rpn_conv = mx.symbol.Convolution(
data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_prob = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape, label=rpn_label, multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1, name="rpn_cls_prob")
rpn_bbox_loss_ = rpn_bbox_weight * mx.symbol.smooth_l1(name='rpn_bbox_loss_', scalar=3.0, data=(rpn_bbox_pred - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss_, grad_scale=1.0 / config.TRAIN.RPN_BATCH_SIZE)
rpn_cls_act = mx.symbol.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_act")
rpn_cls_act_reshape = mx.symbol.Reshape(
data=rpn_cls_act, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_act_reshape')
if config.TRAIN.CXX_PROPOSAL:
rois = mx.contrib.symbol.Proposal(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=config.RPN_FEAT_STRIDE, scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TRAIN.RPN_POST_NMS_TOP_N,
threshold=config.TRAIN.RPN_NMS_THRESH, rpn_min_size=config.TRAIN.RPN_MIN_SIZE)
else:
rois = mx.symbol.Custom(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=config.RPN_FEAT_STRIDE,
scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TRAIN.RPN_POST_NMS_TOP_N,
threshold=config.TRAIN.RPN_NMS_THRESH, rpn_min_size=config.TRAIN.RPN_MIN_SIZE)
gt_boxes_reshape = mx.symbol.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')
group = mx.symbol.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target',
num_classes=num_classes, batch_images=config.TRAIN.BATCH_IMAGES,
batch_rois=config.TRAIN.BATCH_ROIS, fg_fraction=config.TRAIN.FG_FRACTION)
rois = group[0]
label = group[1]
bbox_target = group[2]
bbox_weight = group[3]
roi_pool = mx.symbol.ROIPooling(
name='roi_pool5', data=conv_feat, rois=rois, pooled_size=(14, 14), spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
unit = residual_unit(data=roi_pool, num_filter=filter_list[3], stride=(2, 2), dim_match=False, name='stage4_unit1')
for i in range(2, units[3] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[3], stride=(1, 1), dim_match=True, name='stage4_unit%s' % i)
bn1 = mx.sym.BatchNorm(data=unit, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name='bn1')
relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
pool1 = mx.symbol.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
cls_score = mx.symbol.FullyConnected(name='cls_score', data=pool1, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=pool1, num_hidden=num_classes * 4)
if config.TRAIN.ENABLE_OHEM:
labels_ohem, bbox_weights_ohem = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, \
num_reg_classes=num_classes, roi_per_img=config.TRAIN.BATCH_ROIS_OHEM, cls_score=cls_score, \
bbox_pred=bbox_pred, labels=label, bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, \
normalization='valid', use_ignore=True, ignore_label=-1)
bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, \
data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / config.TRAIN.BATCH_ROIS_OHEM)
elif config.TRAIN.ENABLE_FOCALLOSS:
cls_prob = mx.sym.Custom(op_type='FocalLoss', name='cls_prob', data=cls_score, labels=label, gamma= 2,alpha = 0.25)
bbox_loss_ = bbox_weight * mx.symbol.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / config.TRAIN.BATCH_ROIS)
else:
cls_prob = mx.symbol.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='batch')
bbox_loss_ = bbox_weight * mx.symbol.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / config.TRAIN.BATCH_ROIS)
label = mx.symbol.Reshape(data=label, shape=(config.TRAIN.BATCH_IMAGES, -1), name='label_reshape')
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.symbol.Reshape(data=bbox_loss, shape=(config.TRAIN.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_loss_reshape')
group = mx.symbol.Group([rpn_cls_prob, rpn_bbox_loss, cls_prob, bbox_loss, mx.symbol.BlockGrad(label)])
return group
def get_resnet_test(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS):
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
conv_feat = get_resnet_conv(data)
rpn_conv = mx.symbol.Convolution(
data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_prob = mx.symbol.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_prob")
rpn_cls_prob_reshape = mx.symbol.Reshape(
data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape')
if config.TEST.CXX_PROPOSAL:
rois = mx.contrib.symbol.Proposal(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=config.RPN_FEAT_STRIDE, scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TEST.RPN_POST_NMS_TOP_N,
threshold=config.TEST.RPN_NMS_THRESH, rpn_min_size=config.TEST.RPN_MIN_SIZE)
else:
rois = mx.symbol.Custom(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=config.RPN_FEAT_STRIDE,
scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TEST.RPN_POST_NMS_TOP_N,
threshold=config.TEST.RPN_NMS_THRESH, rpn_min_size=config.TEST.RPN_MIN_SIZE)
roi_pool = mx.symbol.ROIPooling(
name='roi_pool5', data=conv_feat, rois=rois, pooled_size=(14, 14), spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
unit = residual_unit(data=roi_pool, num_filter=filter_list[3], stride=(2, 2), dim_match=False, name='stage4_unit1')
for i in range(2, units[3] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[3], stride=(1, 1), dim_match=True, name='stage4_unit%s' % i)
bn1 = mx.sym.BatchNorm(data=unit, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name='bn1')
relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
pool1 = mx.symbol.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
cls_score = mx.symbol.FullyConnected(name='cls_score', data=pool1, num_hidden=num_classes)
cls_prob = mx.symbol.softmax(name='cls_prob', data=cls_score)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=pool1, num_hidden=num_classes * 4)
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.symbol.Reshape(data=bbox_pred, shape=(config.TEST.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_pred_reshape')
group = mx.symbol.Group([rois, cls_prob, bbox_pred])
return group
| true
| true
|
790c3ee492fc6d12bb41d69658d0f8e53245b9a4
| 6,930
|
py
|
Python
|
cryomem/cmtools/lib/old/sql_svjj_old2.py
|
bebaek/cryomem
|
088fba2568d10451adda51a068c15c8c2a73d9ce
|
[
"MIT"
] | 1
|
2018-09-16T12:29:04.000Z
|
2018-09-16T12:29:04.000Z
|
cryomem/cmtools/lib/old/sql_svjj_old2.py
|
bebaek/cryomem
|
088fba2568d10451adda51a068c15c8c2a73d9ce
|
[
"MIT"
] | null | null | null |
cryomem/cmtools/lib/old/sql_svjj_old2.py
|
bebaek/cryomem
|
088fba2568d10451adda51a068c15c8c2a73d9ce
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
"""
Manage database of SV JJ device fab/measurement parameters
File format: SQLite
Tables: barrier (dep structure), shape, josephson (measured params),
trend (fitted Jc, RnA, IcRn)
BB, 2015
"""
print('hello')
import sqlite3
# Restrict table or column name for security
def scrub(table_name):
return ''.join( chr for chr in table_name if chr.isalnum() or chr=='_' )
# Build string: 'column1=?,column2=?,...'
def assignform(collist):
s = ' '
for col in collist:
s += col + '=?,'
return s.rstrip(',')
class SVJJDB():
def __init__(self, filename='svjj.db'):
# Table structures
self.colnames = {
'barrier': ['wafer', 'chip', 'structure', 'fm1_name',
'fm1_thickness', 'fm2_name', 'fm2_thickness'],
'shape': ['wafer', 'chip', 'device', 'shape', 'dim1', 'dim2'],
'josephson': ['wafer', 'chip', 'device', 'temperature',
'ic_p', 'ic_ap', 'r_p', 'r_ap'],
#'trend': ['wafer', 'structure', 'jc_p', 'jc_ap',
# 'fm1_thickness', 'fm2_name', 'fm2_thickness'],
}
self.datatypes = {'wafer': 'string', 'chip':'string',
'structure': 'string','shape':'string', 'fm1_name': 'string',
'fm2_name': 'string', 'fm1_thickness': 'float',
'fm2_thickness': 'float', 'device': 'string', 'dim1': 'float',
'dim2': 'float', 'temperature': 'float', 'ic_p': 'float',
'ic_ap': 'float', 'r_p': 'float', 'r_ap': 'float'}
# Default values
self.val0 = {'wafer': 'B150323a', 'chip': '56',
'structure': 'Fe/Cu/Ni/Cu', 'shape':'ellipse', 'fm1_name': 'Fe',
'fm2_name': 'Ni', 'fm1_thickness': '1e-9',
'fm2_thickness': '2.4e-9', 'device': 'A01', 'dim1': '1e-6',
'dim2': '1e-6', 'temperature': 4, 'ic_p': '10e-6',
'ic_ap': '5e-6', 'r_p': '1', 'r_ap': '1'}
self.conn = sqlite3.connect(filename)
self.c = self.conn.cursor()
def create_tables(self):
# Create barrier structure table
self.c.execute('''CREATE TABLE barrier
(wafer text, chip text, structure text,
fm1_name text, fm1_thickness real,
fm2_name text, fm2_thickness real)''')
# Create device shape table
self.c.execute('''CREATE TABLE shape
(wafer text, chip text, device text,
shape text, dim1 real, dim2 real)''')
# Create josephson measurement result table
self.c.execute('''CREATE TABLE josephson
(wafer text, chip text, device text, temperature real,
ic_p real, ic_ap real, r_p real, r_ap real)''')
def close(self, save=True):
if save: self.conn.commit() # save
self.conn.close()
# Insert a row in barrier table
def insert_row(self, table, arg):
s1 = 'INSERT INTO %s VALUES ' % scrub(table)
s2 = '(' + '?,'*(len(arg)-1) + '?)'
self.c.execute(s1+s2, arg)
def print_table(self, table):
print(self.colnames[table])
for row in self.c.execute('SELECT * FROM %s'%scrub(table)):
print(row)
#def print_table(self, table, ordercol):
#for row in self.c.execute('SELECT * FROM %s ORDER BY ?'%scrub(table),\
# (ordercol,)):
def delete_row(self, table, args):
if table == 'barrier':
self.c.execute('DELETE FROM %s WHERE wafer=? AND chip=?'
% scrub(table), args)
elif table == 'shape' or table == 'josephson':
self.c.execute('DELETE FROM %s WHERE '
'wafer=? AND chip=? AND device=?' % scrub(table), args)
else:
print('No table name: %s' % table)
def update_row(self, table, vals, **newassign):
s1 = 'UPDATE %s' % scrub(table)
s2 = ' SET' + assignform(self.colnames[table])
s3 = ' WHERE' + assignform(matchcols)
print(s1+s2+s3)
#self.c.execute(s1 + s2 + s3, vals + matchvals)
# Derived class for interactive shell execution
class SVJJDBInteract(SVJJDB):
#def create_db(self, *arg):
# self.create_tables(*arg) # pass filename
def print(self, table):
self.print_table(table)
# Get inputs from argument or interactively
# Use val0 as default for interactive case
def input_param(self, key, val0='0', **kwargs):
interact = kwargs.get('interact', True)
datatype = kwargs.get('datatype', 'string')
if interact:
msg = input(key + '? [%s] '%str(val0))
if msg == '': msg = val0 # empty input means default
if datatype == 'string': val = msg
if datatype == 'int': val = int(msg)
if datatype == 'float': val = float(msg)
else:
val = val0
return val
def insert(self, table):
vals = ()
for col in self.colnames[table]:
vals = vals + (self.input_param(col, self.val0[col],\
datatype=self.datatypes[col], interact=True),)
self.insert_row(table, vals)
def delete(self, table, *args):
self.delete_row(table, args)
# *args = wafer, chip, [device]
def update(self, table, **newassign):
# Load previous values as default (val0)
# User input
vals = ()
for col in self.colnames[table]:
vals = vals + (self.input_param(col, self.val0[col],\
datatype=self.datatypes[col], interact=True),)
self.update_row(table, vals, **newassign)
# Pass on any SQL statement
def execsql(self, *cmd):
self.c.execute(cmd[0])
def args2kwargs(args):
l = []
for arg in args:
l += [arg.split('=')]
return dict(l)
# main shell interface (run SVJJDBInteract class)
def app(argv):
"""Execute in system shell
"""
if len(argv) < 2:
print("Usage: python %s <command> <table> [<column1>=<value1> [...]]\n"
" <command>: print, insert, delete, or edit\n"
" <table>: barrier, shape, or josephson\n" % argv[0])
sys.exit(0)
db = SVJJDBInteract()
# Fixed arguments
funcname = argv[1]
table = argv[2]
# Convert extra to keyword arguments
kwargs = args2kwargs(argv[3:])
getattr(db, funcname)(table, **kwargs)
db.close()
# simple test run
def app2(argv):
db = SVJJDB()
#db.create_tables()
db.insert_row('barrier', ('B150413', '22', 'Fe/Cu/Ni/Cu', 'Fe', 1e-9,\
'Ni', 2.4e-9))
db.print_table('barrier', 'chip')
db.close()
if __name__ == '__main__':
import sys
print(sys.version)
app(sys.argv)
print('Bye!')
| 33.970588
| 80
| 0.540404
|
print('hello')
import sqlite3
def scrub(table_name):
return ''.join( chr for chr in table_name if chr.isalnum() or chr=='_' )
def assignform(collist):
s = ' '
for col in collist:
s += col + '=?,'
return s.rstrip(',')
class SVJJDB():
def __init__(self, filename='svjj.db'):
self.colnames = {
'barrier': ['wafer', 'chip', 'structure', 'fm1_name',
'fm1_thickness', 'fm2_name', 'fm2_thickness'],
'shape': ['wafer', 'chip', 'device', 'shape', 'dim1', 'dim2'],
'josephson': ['wafer', 'chip', 'device', 'temperature',
'ic_p', 'ic_ap', 'r_p', 'r_ap'],
}
self.datatypes = {'wafer': 'string', 'chip':'string',
'structure': 'string','shape':'string', 'fm1_name': 'string',
'fm2_name': 'string', 'fm1_thickness': 'float',
'fm2_thickness': 'float', 'device': 'string', 'dim1': 'float',
'dim2': 'float', 'temperature': 'float', 'ic_p': 'float',
'ic_ap': 'float', 'r_p': 'float', 'r_ap': 'float'}
self.val0 = {'wafer': 'B150323a', 'chip': '56',
'structure': 'Fe/Cu/Ni/Cu', 'shape':'ellipse', 'fm1_name': 'Fe',
'fm2_name': 'Ni', 'fm1_thickness': '1e-9',
'fm2_thickness': '2.4e-9', 'device': 'A01', 'dim1': '1e-6',
'dim2': '1e-6', 'temperature': 4, 'ic_p': '10e-6',
'ic_ap': '5e-6', 'r_p': '1', 'r_ap': '1'}
self.conn = sqlite3.connect(filename)
self.c = self.conn.cursor()
def create_tables(self):
self.c.execute('''CREATE TABLE barrier
(wafer text, chip text, structure text,
fm1_name text, fm1_thickness real,
fm2_name text, fm2_thickness real)''')
self.c.execute('''CREATE TABLE shape
(wafer text, chip text, device text,
shape text, dim1 real, dim2 real)''')
self.c.execute('''CREATE TABLE josephson
(wafer text, chip text, device text, temperature real,
ic_p real, ic_ap real, r_p real, r_ap real)''')
def close(self, save=True):
if save: self.conn.commit()
self.conn.close()
def insert_row(self, table, arg):
s1 = 'INSERT INTO %s VALUES ' % scrub(table)
s2 = '(' + '?,'*(len(arg)-1) + '?)'
self.c.execute(s1+s2, arg)
def print_table(self, table):
print(self.colnames[table])
for row in self.c.execute('SELECT * FROM %s'%scrub(table)):
print(row)
def delete_row(self, table, args):
if table == 'barrier':
self.c.execute('DELETE FROM %s WHERE wafer=? AND chip=?'
% scrub(table), args)
elif table == 'shape' or table == 'josephson':
self.c.execute('DELETE FROM %s WHERE '
'wafer=? AND chip=? AND device=?' % scrub(table), args)
else:
print('No table name: %s' % table)
def update_row(self, table, vals, **newassign):
s1 = 'UPDATE %s' % scrub(table)
s2 = ' SET' + assignform(self.colnames[table])
s3 = ' WHERE' + assignform(matchcols)
print(s1+s2+s3)
class SVJJDBInteract(SVJJDB):
(self, table):
self.print_table(table)
def input_param(self, key, val0='0', **kwargs):
interact = kwargs.get('interact', True)
datatype = kwargs.get('datatype', 'string')
if interact:
msg = input(key + '? [%s] '%str(val0))
if msg == '': msg = val0
if datatype == 'string': val = msg
if datatype == 'int': val = int(msg)
if datatype == 'float': val = float(msg)
else:
val = val0
return val
def insert(self, table):
vals = ()
for col in self.colnames[table]:
vals = vals + (self.input_param(col, self.val0[col],\
datatype=self.datatypes[col], interact=True),)
self.insert_row(table, vals)
def delete(self, table, *args):
self.delete_row(table, args)
def update(self, table, **newassign):
vals = ()
for col in self.colnames[table]:
vals = vals + (self.input_param(col, self.val0[col],\
datatype=self.datatypes[col], interact=True),)
self.update_row(table, vals, **newassign)
def execsql(self, *cmd):
self.c.execute(cmd[0])
def args2kwargs(args):
l = []
for arg in args:
l += [arg.split('=')]
return dict(l)
def app(argv):
if len(argv) < 2:
print("Usage: python %s <command> <table> [<column1>=<value1> [...]]\n"
" <command>: print, insert, delete, or edit\n"
" <table>: barrier, shape, or josephson\n" % argv[0])
sys.exit(0)
db = SVJJDBInteract()
funcname = argv[1]
table = argv[2]
kwargs = args2kwargs(argv[3:])
getattr(db, funcname)(table, **kwargs)
db.close()
def app2(argv):
db = SVJJDB()
db.insert_row('barrier', ('B150413', '22', 'Fe/Cu/Ni/Cu', 'Fe', 1e-9,\
'Ni', 2.4e-9))
db.print_table('barrier', 'chip')
db.close()
if __name__ == '__main__':
import sys
print(sys.version)
app(sys.argv)
print('Bye!')
| true
| true
|
790c3f5ad6dc4ed22ca30ff6fbd0ee4526b37abe
| 2,796
|
py
|
Python
|
tests/st/ops/ascend/vector/test_expm1_001.py
|
tianjiashuo/akg
|
a9cbf642063fb1086a93e8bc6be6feb145689817
|
[
"Apache-2.0"
] | 286
|
2020-06-23T06:40:44.000Z
|
2022-03-30T01:27:49.000Z
|
tests/st/ops/ascend/vector/test_expm1_001.py
|
tianjiashuo/akg
|
a9cbf642063fb1086a93e8bc6be6feb145689817
|
[
"Apache-2.0"
] | 10
|
2020-07-31T03:26:59.000Z
|
2021-12-27T15:00:54.000Z
|
tests/st/ops/ascend/vector/test_expm1_001.py
|
tianjiashuo/akg
|
a9cbf642063fb1086a93e8bc6be6feb145689817
|
[
"Apache-2.0"
] | 30
|
2020-07-17T01:04:14.000Z
|
2021-12-27T14:05:19.000Z
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
"""
import os
import pytest
from tests.common.base import TestBase
from tests.common.test_run.ascend.expm1_run import expm1_run
############################################################
# TestCase= class: put to tests/*/
############################################################
class TestCase(TestBase):
def setup(self):
case_name = "test_expm1_001"
case_path = os.getcwd()
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
# testflag,opfuncname,testRunArgs, setdimArgs
("expm1_001", expm1_run, ([4, 3], 'float16')),
("expm1_002", expm1_run, ([4, 16], 'float32')),
("expm1_003", expm1_run, ([4, ], 'float16')),
("expm1_004", expm1_run, ([4, 3, 16], 'float16')),
("expm1_005", expm1_run, ([32, 1024], 'float32')),
]
self.testarg_rpc_cloud = [
# testflag,opfuncname,testRunArgs, setdimArgs
("expm1_006", expm1_run, ([4, 3], 'float16')),
("expm1_007", expm1_run, ([4, 3], 'float32')),
("expm1_008", expm1_run, ([4, ], 'float16')),
("expm1_009", expm1_run, ([4, 3, 16], 'float16')),
("expm1_010", expm1_run, ([32, 1024], 'float32')),
]
return
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg)
def test_run_rpc_cloud(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg_rpc_cloud)
def teardown(self):
"""
clean environment
:return:
"""
self._log.info("============= {0} Teardown============".format(self.casename))
return
| 32.511628
| 89
| 0.546853
|
import os
import pytest
from tests.common.base import TestBase
from tests.common.test_run.ascend.expm1_run import expm1_run
| true
| true
|
790c3fa145d078876a1db5d5bd7a2995aba1c169
| 1,527
|
py
|
Python
|
GLADalertTRASE/update_data/functions.py
|
scpyork/GladAlert
|
6b8622ee9ff5d53186e2d7225b856a8855fe1da8
|
[
"0BSD"
] | null | null | null |
GLADalertTRASE/update_data/functions.py
|
scpyork/GladAlert
|
6b8622ee9ff5d53186e2d7225b856a8855fe1da8
|
[
"0BSD"
] | 2
|
2021-01-28T20:05:32.000Z
|
2022-03-25T18:51:24.000Z
|
GLADalertTRASE/update_data/functions.py
|
scpyork/GladAlert
|
6b8622ee9ff5d53186e2d7225b856a8855fe1da8
|
[
"0BSD"
] | null | null | null |
from new_alerts import *
from PIL import Image # $ pip install pillow
from scipy import sparse
import numpy as np
import re
Image.MAX_IMAGE_PIXELS = None
def download(keep,tempdir):
#print keep
#class rt:pass
name = keep.split('/')[-1]
area = re.findall(r'_(\d+[NESW\b])',name)
# current file position
position = map(direction, area)
#print position
group = '|'.join((str(i) for i in position))
date = keep.split('/')[-2]
url = keep.replace('gs://','https://storage.cloud.google.com/')#+'?authuser=0'
#2
## copy / download the files into the temp directory
(os.popen('gsutil cp %s %s/%s >/dev/null 2>&1 && echo "Copied: %s" >> temp.log'%(keep,tempdir,name,keep))) #.read())
#print (os.popen('gsutil cp %s %s/%s'%(keep,tempdir,name)).read(), 'gsutil cp %s %s/%s'%(keep,tempdir,name))
## Read image pixels using pillow library
# >/dev/null 2>&1 && echo "Copied: %s" >> temp.log'
im = Image.open('%s/%s'%(tempdir,name))
## Image pixels to a sparse array
data = sparse.coo_matrix(im,int)
## remove downloaded file
os.system('rm %s/%s'%(tempdir,name))
data = np.array([
data.row.astype(float)* 0.00025 + float(position[0]) ,
#/float(data.shape[0])*(float(position[2]-position[0]))+position[0],
data.col.astype(float)* 0.00025 + float(position[1]) ,
#/float(data.shape[1])*(float(position[3]-position[1]))+position[1],
data.data
])
#print data[:,0],data[:,-1], position
return data
| 29.941176
| 120
| 0.607728
|
from new_alerts import *
from PIL import Image
from scipy import sparse
import numpy as np
import re
Image.MAX_IMAGE_PIXELS = None
def download(keep,tempdir):
name = keep.split('/')[-1]
area = re.findall(r'_(\d+[NESW\b])',name)
position = map(direction, area)
group = '|'.join((str(i) for i in position))
date = keep.split('/')[-2]
url = keep.replace('gs://','https://storage.cloud.google.com/')
& echo "Copied: %s" >> temp.log'%(keep,tempdir,name,keep)))
ir,name))
## Image pixels to a sparse array
data = sparse.coo_matrix(im,int)
## remove downloaded file
os.system('rm %s/%s'%(tempdir,name))
data = np.array([
data.row.astype(float)* 0.00025 + float(position[0]) ,
#/float(data.shape[0])*(float(position[2]-position[0]))+position[0],
data.col.astype(float)* 0.00025 + float(position[1]) ,
#/float(data.shape[1])*(float(position[3]-position[1]))+position[1],
data.data
])
#print data[:,0],data[:,-1], position
return data
| true
| true
|
790c40c901cad2eed429ebb86f5df573ea107ba3
| 8,916
|
py
|
Python
|
city_scrapers/spiders/chi_school_community_action_council.py
|
jim/documenters-aggregator
|
c619b5cb3c6eb093f60662749cd76717b182c40c
|
[
"MIT"
] | null | null | null |
city_scrapers/spiders/chi_school_community_action_council.py
|
jim/documenters-aggregator
|
c619b5cb3c6eb093f60662749cd76717b182c40c
|
[
"MIT"
] | null | null | null |
city_scrapers/spiders/chi_school_community_action_council.py
|
jim/documenters-aggregator
|
c619b5cb3c6eb093f60662749cd76717b182c40c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import scrapy
from city_scrapers.spider import Spider
from datetime import datetime, timedelta
from dateutil.parser import parse as dateparse
import re
class Chi_school_community_action_councilSpider(Spider):
name = 'chi_school_community_action_council'
long_name = 'Chicago Public Schools Community Action Council'
allowed_domains = ['cps.edu']
start_urls = ['http://cps.edu/FACE/Pages/CAC.aspx']
def parse(self, response):
"""
`parse` should always `yield` a dict that follows the Event Schema
<https://city-bureau.github.io/city-scrapers/06_event_schema.html>.
Change the `_parse_id`, `_parse_name`, etc methods to fit your scraping
needs.
"""
month_counter = datetime.today().month # Sets month counter to the current month, which is passed to parse_start
for x in range(12): # iterates through every month in the year after the current month
if month_counter > 12:
break
else:
for item in response.css("ul").css('li')[17:]:
try:
if item.css("strong").css("a::attr(href)").extract()[0] == 'http://www.humboldtparkportal.org/':
continue
except:
pass
data = {
'_type': 'event',
'name': self._parse_name(item),
'description': self._parse_description(item),
'classification': self._parse_classification(item),
'start_time': self._parse_start(item, month_counter),
'end_time': self._parse_end(item),
'timezone': self._parse_timezone(item),
'status': self._parse_status(item),
'all_day': self._parse_all_day(item),
'location': self._parse_location(item),
'sources': self._parse_sources(response),
'community_area' : self._parse_community_area(item)
}
data['id'] = self._generate_id(data)
data['end_time'] = data['start_time'] + timedelta(hours=3) #adds 3 hours to start time
yield data
month_counter += 1 # month counter is increased by 1 month with each iteration of the for loop
# self._parse_next(response) yields more responses to parse if necessary.
# uncomment to find a "next" url
# yield self._parse_next(response)
def _parse_community_area(self, item):
"""
Parse or generate community area.
"""
if len(item.css('li').css('strong::text').extract()) == 1:
community_name = item.css('li').css('strong::text').extract()
else:
community_name = item.css('li').css('strong').css('a::text').extract()
return community_name[0]
def _parse_name(self, item):
"""
Parse or generate event name.
"""
if len(item.css('li').css('strong::text').extract()) == 1:
community_name = item.css('li').css('strong::text').extract()
else:
community_name = item.css('li').css('strong').css('a::text').extract()
return community_name[0] + ' Community Action Council'
def _parse_description(self, item):
"""
Parse or generate event description.
"""
return "Community Action Councils, or CACs, consist of 25-30 voting members who are " \
"directly involved in developing a strategic plan for educational success within " \
"their communities. CAC members include parents; elected officials; faith-based " \
"institutions, health care and community-based organizations; Local School" \
" Council (LSC) members; business leaders; educators and school administrators; " \
"staff members from Chicago's Sister Agencies; community residents; " \
"and students. There are nine CACs across Chicago. Each works to empower the " \
"community they serve to lead the improvement of local quality education."
def _parse_classification(self, item):
"""
Parse or generate classification (e.g. public health, education, etc).
"""
return 'Education'
def _parse_start(self, item, month_counter):
"""
Parse start date and time.
Accepts month_counter as an argument from top level parse function to iterate through all months in the year.
"""
def parse_day(source):
'''Parses the source material and retrieves the day of the week that the meeting occurs.
'''
day_source = source[0]
day_regex = re.compile(r'[a-zA-Z]+day')
mo = day_regex.search(day_source)
return mo.group().lower()
def parse_time(source):
'''Parses the source material and retrieves the time that the meeting occurs.
'''
time_source = source[1]
time_regex = re.compile(r'(1[012]|[1-9]):[0-5][0-9](am|pm)')
mo = time_regex.search(time_source)
return mo.group()
def count_days(day, week_count):
'''Because the source material provides meeting dates on a reoccuring schedule, we must use the parsed day
from the parse_day function and the '''
today = datetime.today()
week_day = {'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3, 'friday': 4, 'saturday': 5,
'sunday': 6}
week_counter = 0
for x in range(1, 31):
try:
current_date = datetime(today.year, month_counter, x) #uses month counter from top level parse func.
if current_date.weekday() == week_day[day]:
week_counter += 1
if week_counter == int(week_count):
return current_date
except ValueError as e: # will break loop if range exceeds the number of days in the month
break
def concat_date(meeting_date, time):
'''Combines the meeting date with the time the meeting occurs. Function return a datetime
object.
'''
return dateparse(
str(meeting_date.year) + '-' + str(meeting_date.month) + '-' + str(meeting_date.day) + ' ' + time)
def get_start(source):
'''Combines above defined parse_day, parse_time, count_days, and concat_date functions to get the start
date from the source. If a start time cannot be found the UNIX epoch date is returned.
'''
day = parse_day(source)
week_count = source[0].strip()[
0] # selects first character in the source, which is usually the week count
if week_count.isdigit():
time = parse_time(source)
meeting_date = count_days(day, week_count)
start = concat_date(meeting_date, time)
else:
pass
return start
source = item.css('li::text').extract()
return get_start(source)
def _parse_end(self, item):
"""
Parse end date and time.
"""
return 'Estimated 3 hours'
def _parse_timezone(self, item):
"""
Parse or generate timzone in tzinfo format.
"""
return 'America/Chicago'
def _parse_all_day(self, item):
"""
Parse or generate all-day status. Defaults to False.
"""
return False
def _parse_location(self, item):
"""
Parse or generate location. Latitude and longitude can be
left blank and will be geocoded later.
"""
source = item.css('li::text').extract()[1]
return {
'url': None,
'name': source[source.find("at")+2:source.find("(")].replace('the', ''),
'address': source[source.find("(")+1:source.find(")")],
'coordinates': {
'latitude': None,
'longitude': None,
},
}
def _parse_status(self, item):
"""
Parse or generate status of meeting. Can be one of:
* cancelled
* tentative
* confirmed
* passed
By default, return "tentative"
"""
return 'Tentative'
def _parse_sources(self, response):
"""
Parse or generate sources.
"""
return [{
'url': response.url,
'note': '',
}]
| 41.277778
| 120
| 0.549237
|
import scrapy
from city_scrapers.spider import Spider
from datetime import datetime, timedelta
from dateutil.parser import parse as dateparse
import re
class Chi_school_community_action_councilSpider(Spider):
name = 'chi_school_community_action_council'
long_name = 'Chicago Public Schools Community Action Council'
allowed_domains = ['cps.edu']
start_urls = ['http://cps.edu/FACE/Pages/CAC.aspx']
def parse(self, response):
month_counter = datetime.today().month
for x in range(12):
if month_counter > 12:
break
else:
for item in response.css("ul").css('li')[17:]:
try:
if item.css("strong").css("a::attr(href)").extract()[0] == 'http://www.humboldtparkportal.org/':
continue
except:
pass
data = {
'_type': 'event',
'name': self._parse_name(item),
'description': self._parse_description(item),
'classification': self._parse_classification(item),
'start_time': self._parse_start(item, month_counter),
'end_time': self._parse_end(item),
'timezone': self._parse_timezone(item),
'status': self._parse_status(item),
'all_day': self._parse_all_day(item),
'location': self._parse_location(item),
'sources': self._parse_sources(response),
'community_area' : self._parse_community_area(item)
}
data['id'] = self._generate_id(data)
data['end_time'] = data['start_time'] + timedelta(hours=3)
yield data
month_counter += 1
def _parse_community_area(self, item):
if len(item.css('li').css('strong::text').extract()) == 1:
community_name = item.css('li').css('strong::text').extract()
else:
community_name = item.css('li').css('strong').css('a::text').extract()
return community_name[0]
def _parse_name(self, item):
if len(item.css('li').css('strong::text').extract()) == 1:
community_name = item.css('li').css('strong::text').extract()
else:
community_name = item.css('li').css('strong').css('a::text').extract()
return community_name[0] + ' Community Action Council'
def _parse_description(self, item):
return "Community Action Councils, or CACs, consist of 25-30 voting members who are " \
"directly involved in developing a strategic plan for educational success within " \
"their communities. CAC members include parents; elected officials; faith-based " \
"institutions, health care and community-based organizations; Local School" \
" Council (LSC) members; business leaders; educators and school administrators; " \
"staff members from Chicago's Sister Agencies; community residents; " \
"and students. There are nine CACs across Chicago. Each works to empower the " \
"community they serve to lead the improvement of local quality education."
def _parse_classification(self, item):
return 'Education'
def _parse_start(self, item, month_counter):
def parse_day(source):
day_source = source[0]
day_regex = re.compile(r'[a-zA-Z]+day')
mo = day_regex.search(day_source)
return mo.group().lower()
def parse_time(source):
time_source = source[1]
time_regex = re.compile(r'(1[012]|[1-9]):[0-5][0-9](am|pm)')
mo = time_regex.search(time_source)
return mo.group()
def count_days(day, week_count):
today = datetime.today()
week_day = {'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3, 'friday': 4, 'saturday': 5,
'sunday': 6}
week_counter = 0
for x in range(1, 31):
try:
current_date = datetime(today.year, month_counter, x) #uses month counter from top level parse func.
if current_date.weekday() == week_day[day]:
week_counter += 1
if week_counter == int(week_count):
return current_date
except ValueError as e: # will break loop if range exceeds the number of days in the month
break
def concat_date(meeting_date, time):
return dateparse(
str(meeting_date.year) + '-' + str(meeting_date.month) + '-' + str(meeting_date.day) + ' ' + time)
def get_start(source):
day = parse_day(source)
week_count = source[0].strip()[
0] # selects first character in the source, which is usually the week count
if week_count.isdigit():
time = parse_time(source)
meeting_date = count_days(day, week_count)
start = concat_date(meeting_date, time)
else:
pass
return start
source = item.css('li::text').extract()
return get_start(source)
def _parse_end(self, item):
return 'Estimated 3 hours'
def _parse_timezone(self, item):
return 'America/Chicago'
def _parse_all_day(self, item):
return False
def _parse_location(self, item):
source = item.css('li::text').extract()[1]
return {
'url': None,
'name': source[source.find("at")+2:source.find("(")].replace('the', ''),
'address': source[source.find("(")+1:source.find(")")],
'coordinates': {
'latitude': None,
'longitude': None,
},
}
def _parse_status(self, item):
return 'Tentative'
def _parse_sources(self, response):
return [{
'url': response.url,
'note': '',
}]
| true
| true
|
790c4119de50c7acd948ff5ea64262bcdd31709e
| 742
|
py
|
Python
|
Lab01/lab01_03.py
|
micu01/ProgAlgo
|
fae21f563656c0d2b9d378db67e22f907486170f
|
[
"MIT"
] | 3
|
2020-01-02T10:31:42.000Z
|
2020-01-16T10:49:36.000Z
|
Lab01/lab01_03.py
|
micu01/ProgAlgo
|
fae21f563656c0d2b9d378db67e22f907486170f
|
[
"MIT"
] | null | null | null |
Lab01/lab01_03.py
|
micu01/ProgAlgo
|
fae21f563656c0d2b9d378db67e22f907486170f
|
[
"MIT"
] | null | null | null |
# Un meșter trebuie să paveze întreaga pardoseală a unei bucătării cu formă
# dreptunghiulară de dimensiune L_1×L_2 centimetri, cu plăci de gresie
# pătrate, toate cu aceeași dimensiune. Știind că meșterul nu vrea să taie nici o
# placă de gresie și vrea să folosească un număr minim de plăci, să se
# determine dimensiunea plăcilor de gresie de care are nevoie, precum și
# numărul lor. De exemplu, dacă L_1=440 cm și L_2=280 cm, atunci meșterul
# are nevoie de 77 de plăci de gresie, fiecare având latura de 40 cm.
L_1 = int(input('L_1: '))
L_2 = int(input('L_2: '))
aria = L_1*L_2
# aflam cmmdc dintre latruri
while L_1 != L_2:
if L_1 > L_2:
L_1 -= L_2
else:
L_2 -= L_1
dim = L_1
nr = aria/dim**2
print(dim, nr)
| 32.26087
| 81
| 0.708895
|
L_1 = int(input('L_1: '))
L_2 = int(input('L_2: '))
aria = L_1*L_2
while L_1 != L_2:
if L_1 > L_2:
L_1 -= L_2
else:
L_2 -= L_1
dim = L_1
nr = aria/dim**2
print(dim, nr)
| true
| true
|
790c423038cefcdab34e20895a1e697946a8c00c
| 47,403
|
py
|
Python
|
python/words.py
|
flapperleenie/thevocals
|
8f7f38c1dd1b39a4b811c26d5487fe4d6430cd28
|
[
"MIT"
] | null | null | null |
python/words.py
|
flapperleenie/thevocals
|
8f7f38c1dd1b39a4b811c26d5487fe4d6430cd28
|
[
"MIT"
] | null | null | null |
python/words.py
|
flapperleenie/thevocals
|
8f7f38c1dd1b39a4b811c26d5487fe4d6430cd28
|
[
"MIT"
] | null | null | null |
parts = ["aardvark", "abacus", "abbey", "abdomen", "ability", "abolishment", "abroad", "accelerant", "accelerator", "accident", "accompanist", "accordion", "account", "accountant", "achieve", "achiever", "acid", "acknowledgment", "acoustic", "acoustics", "acrylic", "act", "action", "active", "activity", "actor", "actress", "acupuncture", "ad", "adapter", "addiction", "addition", "address", "adjustment", "administration", "adrenalin", "adult", "advancement", "advantage", "advertisement", "advertising", "advice", "affair", "affect", "afghanistan", "africa", "aftermath", "afternoon", "aftershave", "aftershock", "afterthought", "age", "agency", "agenda", "agent", "aglet", "agreement", "air", "airbag", "airbus", "airfare", "airforce", "airline", "airmail", "airplane", "airport", "airship", "alarm", "alb", "albatross", "alcohol", "alcove", "alder", "algebra", "algeria", "alibi", "allergist", "alley", "alligator", "alloy", "almanac", "almond", "alpaca", "alpenglow", "alpenhorn", "alpha", "alphabet", "alternative", "altitude", "alto", "aluminium", "aluminum", "ambassador", "ambition", "ambulance", "amendment", "america", "amount", "amusement", "anagram", "analgesia", "analog", "analysis", "analyst", "anatomy", "anesthesiology", "anethesiologist", "anger", "angiosperm", "angle", "angora", "angstrom", "anguish", "animal", "anime", "ankle", "anklet", "annual", "anorak", "answer", "ant", "antarctica", "anteater", "antechamber", "antelope", "anthony", "anthropology", "antler", "anxiety", "anybody", "anything", "anywhere", "apartment", "ape", "aperitif", "apology", "apparatus", "apparel", "appeal", "appearance", "appendix", "apple", "applewood", "appliance", "application", "appointment", "approval", "april", "apron", "apse", "aquarius", "aquifer", "arch", "archaeology", "archeology", "archer", "architect", "architecture", "arch-rival", "area", "argentina", "argument", "aries", "arithmetic", "arm", "armadillo", "armament", "armchair", "armoire", "armor", "arm-rest", "army", "arrival", "arrow", "art", "artichoke", "article", "artificer", "ascot", "ash", "ashram", "ashtray", "asia", "asparagus", "aspect", "asphalt", "assignment", "assistance", "assistant", "associate", "association", "assumption", "asterisk", "astrakhan", "astrolabe", "astrologer", "astrology", "astronomy", "atelier", "athelete", "athlete", "atm", "atmosphere", "atom", "atrium", "attachment", "attack", "attempt", "attendant", "attention", "attenuation", "attic", "attitude", "attorney", "attraction", "audience", "auditorium", "august", "aunt", "australia", "author", "authorisation", "authority", "authorization", "automaton", "avalanche", "avenue", "average", "awareness", "azimuth", "babe", "babies", "baboon", "babushka", "baby", "back", "backbone", "backdrop", "backpack", "bacon", "bad", "badge", "badger", "bafflement", "bag", "bagel", "bagpipe", "bagpipes", "bail", "bait", "bake", "baker", "bakery", "bakeware", "balaclava", "balalaika", "balance", "balcony", "balinese", "ball", "balloon", "ballpark", "bamboo", "banana", "band", "bandana", "bandanna", "bandolier", "bangladesh", "bangle", "banjo", "bank", "bankbook", "banker", "banquette", "baobab", "bar", "barbara", "barbeque", "barber", "barbiturate", "barge", "baritone", "barium", "barn", "barometer", "barracks", "barstool", "base", "baseball", "basement", "basin", "basis", "basket", "basketball", "bass", "bassinet", "bassoon", "bat", "bath", "bather", "bathhouse", "bathrobe", "bathroom", "bathtub", "batter", "battery", "batting", "battle", "battleship", "bay", "bayou", "beach", "bead", "beak", "beam", "bean", "beanie", "beanstalk", "bear", "beard", "beast", "beat", "beautician", "beauty", "beaver", "bed", "bedroom", "bee", "beech", "beef", "beer", "beet", "beetle", "beggar", "beginner", "begonia", "behavior", "beheading", "behest", "belfry", "belief", "believe", "bell", "belligerency", "bellows", "belly", "belt", "bench", "bend", "beneficiary", "benefit", "bengal", "beret", "berry", "bestseller", "best-seller", "betty", "beverage", "beyond", "bibliography", "bicycle", "bid", "bidet", "bifocals", "big", "big-rig", "bijou", "bike", "bikini", "bill", "billboard", "bin", "biology", "biplane", "birch", "bird", "birdbath", "birdcage", "birdhouse", "bird-watcher", "birth", "birthday", "bit", "bite", "black", "blackberry", "blackboard", "blackfish", "bladder", "blade", "blame", "blank", "blanket", "blazer", "blight", "blinker", "blister", "blizzard", "block", "blocker", "blood", "bloodflow", "bloom", "bloomers", "blossom", "blouse", "blow", "blowgun", "blowhole", "blue", "blueberry", "boar", "board", "boat", "boat-building", "boatload", "boatyard", "bobcat", "body", "bog", "bolero", "bolt", "bomb", "bomber", "bondsman", "bone", "bongo", "bonnet", "bonsai", "bonus", "boogeyman", "book", "bookcase", "bookend", "booklet", "booster", "boot", "bootee", "bootie", "boots", "booty", "border", "bore", "bosom", "botany", "bottle", "bottling", "bottom", "bottom-line", "boudoir", "bough", "boundary", "bow", "bower", "bowl", "bowler", "bowling", "bowtie", "box", "boxer", "boxspring", "boy", "boyfriend", "bra", "brace", "bracelet", "bracket", "brain", "brake", "branch", "brand", "brandy", "brass", "brassiere", "bratwurst", "brazil", "bread", "breadcrumb", "break", "breakfast", "breakpoint", "breast", "breastplate", "breath", "breeze", "bribery", "brick", "bricklaying", "bridge", "brief", "briefs", "brilliant", "british", "broccoli", "brochure", "broiler", "broker", "brome", "bronchitis", "bronco", "bronze", "brooch", "brood", "brook", "broom", "brother", "brother-in-law", "brow", "brown", "brush", "brushfire", "brushing", "bubble", "bucket", "buckle", "bud", "budget", "buffer", "buffet", "bug", "buggy", "bugle", "building", "bulb", "bull", "bulldozer", "bullet", "bull-fighter", "bumper", "bun", "bunch", "bungalow", "bunghole", "bunkhouse", "burglar", "burlesque", "burma", "burn", "burn-out", "burst", "bus", "bush", "business", "bust", "bustle", "butane", "butcher", "butter", "button", "buy", "buyer", "buzzard", "cabana", "cabbage", "cabin", "cabinet", "cable", "caboose", "cacao", "cactus", "caddy", "cadet", "cafe", "caftan", "cake", "calcification", "calculation", "calculator", "calculus", "calendar", "calf", "calico", "call", "calm", "camel", "cameo", "camera", "camp", "campaign", "campanile", "can", "canada", "canal", "cancel", "cancer", "candelabra", "candidate", "candle", "candy", "cane", "cannon", "canoe", "canon", "canopy", "canteen", "canvas", "cap", "cape", "capital", "capitulation", "capon", "cappelletti", "cappuccino", "capricorn", "captain", "caption", "car", "caravan", "carbon", "card", "cardboard", "cardigan", "care", "cargo", "carload", "carnation", "carol", "carotene", "carp", "carpenter", "carpet", "carport", "carriage", "carrier", "carrot", "carry", "cart", "cartilage", "cartload", "cartoon", "cartridge", "cascade", "case", "casement", "cash", "cashier", "casino", "casserole", "cassock", "cast", "castanet", "castanets", "castle", "cat", "catacomb", "catamaran", "category", "caterpillar", "cathedral", "catsup", "cattle", "cauliflower", "cause", "caution", "cave", "c-clamp", "cd", "ceiling", "celebration", "celeriac", "celery", "celeste", "cell", "cellar", "cello", "celsius", "cement", "cemetery", "cenotaph", "census", "cent", "centenarian", "center", "centimeter", "centurion", "century", "cephalopod", "ceramic", "cereal", "certification", "cesspool", "chador", "chafe", "chain", "chainstay", "chair", "chairlift", "chairman", "chairperson", "chairwoman", "chaise", "chalet", "chalice", "chalk", "champion", "championship", "chance", "chandelier", "change", "channel", "chap", "chapel", "chapter", "character", "chard", "charge", "charity", "charlatan", "charles", "charm", "chart", "chastity", "chasuble", "chateau", "chauffeur", "chauvinist", "check", "checkroom", "cheek", "cheese", "cheetah", "chef", "chemistry", "cheque", "cherries", "cherry", "chess", "chest", "chick", "chicken", "chicory", "chief", "chiffonier", "child", "childhood", "children", "chill", "chime", "chimpanzee", "chin", "china", "chinese", "chino", "chipmunk", "chit-chat", "chivalry", "chive", "chocolate", "choice", "choker", "chop", "chopstick", "chord", "chowder", "christmas", "christopher", "chrome", "chromolithograph", "chronograph", "chronometer", "chub", "chug", "church", "churn", "cicada", "cigarette", "cinema", "circle", "circulation", "circumference", "cirrus", "citizenship", "city", "civilisation", "clam", "clank", "clapboard", "clarinet", "clasp", "class", "classroom", "claus", "clave", "clavicle", "clavier", "cleaner", "cleat", "cleavage", "clef", "cleric", "clerk", "click", "client", "cliff", "climate", "climb", "clip", "clipper", "cloak", "cloakroom", "clock", "clockwork", "clogs", "cloister", "close", "closet", "cloth", "clothes", "clothing", "cloud", "cloudburst", "cloudy", "clove", "clover", "club", "clutch", "coach", "coal", "coast", "coat", "cob", "cobweb", "cockpit", "cockroach", "cocktail", "cocoa", "cod", "codon", "codpiece", "coevolution", "coffee", "coffin", "coil", "coin", "coinsurance", "coke", "cold", "coliseum", "collar", "collection", "college", "collision", "colloquia", "colombia", "colon", "colonisation", "colony", "color", "colt", "column", "columnist", "comb", "combat", "combination", "comfort", "comfortable", "comic", "comma", "command", "commercial", "commission", "committee", "communicant", "communication", "community", "company", "comparison", "competition", "competitor", "complaint", "complement", "complex", "component", "comportment", "composer", "composition", "compost", "compulsion", "computer", "comradeship", "concept", "concert", "conclusion", "concrete", "condition", "condominium", "condor", "conductor", "cone", "confectionery", "conference", "confidence", "confirmation", "conflict", "confusion", "conga", "congo", "congressman", "congressperson", "congresswoman", "conifer", "connection", "consent", "consequence", "console", "consonant", "conspirator", "constant", "constellation", "construction", "consul", "consulate", "contact lens", "contagion", "contest", "context", "continent", "contract", "contrail", "contrary", "contribution", "control", "convection", "conversation", "convert", "convertible", "cook", "cookie", "cooking", "coonskin", "cope", "cop-out", "copper", "co-producer", "copy", "copyright", "copywriter", "cord", "corduroy", "cork", "cormorant", "corn", "cornerstone", "cornet", "corral", "correspondent", "corridor", "corsage", "cost", "costume", "cot", "cottage", "cotton", "couch", "cougar", "cough", "council", "councilman", "councilor", "councilperson", "councilwoman", "counter", "counter-force", "countess", "country", "county", "couple", "courage", "course", "court", "cousin", "covariate", "cover", "coverall", "cow", "cowbell", "cowboy", "crab", "crack", "cracker", "crackers", "cradle", "craftsman", "crash", "crate", "cravat", "craw", "crawdad", "crayfish", "crayon", "cream", "creative", "creator", "creature", "creche", "credenza", "credit", "creditor", "creek", "creme brulee", "crest", "crew", "crib", "cribbage", "cricket", "cricketer", "crime", "criminal", "crinoline", "criteria", "criterion", "criticism", "crocodile", "crocus", "croissant", "crook", "crop", "cross", "cross-contamination", "cross-stitch", "crotch", "croup", "crow", "crowd", "crown", "crude", "crush", "cry", "crystallography", "cub", "cuban", "cuckoo", "cucumber", "cuff-links", "cultivar", "cultivator", "culture", "culvert", "cummerbund", "cup", "cupboard", "cupcake", "cupola", "curio", "curl", "curler", "currency", "current", "cursor", "curtain", "curve", "cushion", "custard", "custodian", "customer", "cut", "cuticle", "cutlet", "cutover", "cutting", "cyclamen", "cycle", "cyclone", "cylinder", "cymbal", "cymbals", "cynic", "cyst", "cytoplasm", "dad", "daffodil", "dagger", "dahlia", "daisy", "damage", "dame", "dance", "dancer", "danger", "daniel", "dark", "dart", "dash", "dashboard", "data", "database", "date", "daughter", "david", "day", "daybed", "dead", "deadline", "deal", "dealer", "dear", "death", "deathwatch", "deborah", "debt", "debtor", "decade", "december", "decimal", "decision", "deck", "declination", "decongestant", "decrease", "decryption", "dedication", "deer", "defense", "deficit", "definition", "deformation", "degree", "delete", "delivery", "demand", "demur", "den", "denim", "dentist", "deodorant", "department", "departure", "dependent", "deployment", "deposit", "depression", "depressive", "depth", "deputy", "derby", "derrick", "description", "desert", "design", "designer", "desire", "desk", "dessert", "destiny", "destroyer", "destruction", "detail", "detainment", "detective", "detention", "determination", "development", "deviance", "device", "dew", "dhow", "diadem", "diamond", "diaphragm", "diarist", "dibble", "dickey", "dictaphone", "diction", "dictionary", "diet", "dietician", "difference", "differential", "difficulty", "digestion", "digger", "digital", "dilapidation", "dill", "dime", "dimension", "dimple", "diner", "dinghy", "dinner", "dinosaur", "diploma", "dipstick", "direction", "director", "dirndl", "dirt", "disadvantage", "disarmament", "disaster", "disco", "disconnection", "discount", "discovery", "discrepancy", "discussion", "disease", "disembodiment", "disengagement", "disguise", "disgust", "dish", "dishes", "dishwasher", "disk", "display", "disposer", "distance", "distribution", "distributor", "district", "divan", "diver", "divide", "divider", "diving", "division", "dock", "doctor", "document", "doe", "dog", "dogsled", "dogwood", "doll", "dollar", "dolman", "dolphin", "domain", "donald", "donkey", "donna", "door", "doorknob", "doorpost", "dorothy", "dory", "dot", "double", "doubling", "doubt", "doubter", "downforce", "downgrade", "downtown", "draft", "dragon", "dragonfly", "dragster", "drain", "drake", "drama", "dramaturge", "draw", "drawbridge", "drawer", "drawing", "dream", "dredger", "dress", "dresser", "dressing", "drill", "drink", "drive", "driver", "driveway", "driving", "drizzle", "dromedary", "drop", "drug", "drum", "drummer", "drunk", "dry", "dryer", "duck", "duckling", "dud", "duffel", "dugout", "dulcimer", "dumbwaiter", "dump truck", "dune buggy", "dungarees", "dungeon", "duplexer", "dust", "dust storm", "duster", "duty", "dwarf", "dwelling", "dynamo", "eagle", "ear", "eardrum", "earmuffs", "earplug", "earrings", "earth", "earthquake", "earthworm", "ease", "easel", "east", "eave", "eavesdropper", "e-book", "ecclesia", "eclipse", "ecliptic", "economics", "ecumenist", "eddy", "edge", "edger", "editor", "editorial", "education", "edward", "eel", "effacement", "effect", "effective", "efficacy", "efficiency", "effort", "egg", "egghead", "eggnog", "eggplant", "egypt", "eight", "ejector", "elbow", "election", "electrocardiogram", "element", "elephant", "elevator", "elixir", "elizabeth", "elk", "ellipse", "elm", "elongation", "embossing", "emergence", "emergent", "emery", "emotion", "emphasis", "employ", "employee", "employer", "employment", "empowerment", "emu", "encirclement", "encyclopedia", "end", "endothelium", "enemy", "energy", "engine", "engineer", "engineering", "english", "enigma", "enquiry", "entertainment", "enthusiasm", "entrance", "entry", "environment", "epauliere", "epee", "ephemera", "ephemeris", "epoch", "eponym", "epoxy", "equinox", "equipment", "era", "e-reader", "error", "escape", "espadrille", "espalier", "establishment", "estate", "estimate", "estrogen", "estuary", "ethernet", "ethiopia", "euphonium", "eurocentrism", "europe", "evaluator", "evening", "evening-wear", "event", "eviction", "evidence", "evocation", "exam", "examination", "examiner", "example", "exchange", "excitement", "exclamation", "excuse", "executor", "exhaust", "ex-husband", "exile", "existence", "exit", "expansion", "expansionism", "experience", "expert", "explanation", "exposition", "expression", "extension", "extent", "extreme", "ex-wife", "eye", "eyeball", "eyebrow", "eyebrows", "eyeglasses", "eyelash", "eyelashes", "eyelid", "eyelids", "eyeliner", "eyestrain", "face", "facelift", "facet", "facilities", "facsimile", "fact", "factor", "factory", "faculty", "fahrenheit", "failure", "fairies", "fairy", "fall", "falling-out", "familiar", "family", "fan", "fang", "fanlight", "fanny", "fanny-pack", "farm", "farmer", "fascia", "fat", "father", "father-in-law", "fatigues", "faucet", "fault", "fawn", "fax", "fear", "feast", "feather", "feature", "february", "fedelini", "fedora", "feed", "feedback", "feeling", "feet", "felony", "female", "fen", "fence", "fencing", "fender", "ferry", "ferryboat", "fertilizer", "few", "fiber", "fiberglass", "fibre", "fiction", "fiddle", "field", "fifth", "fight", "fighter", "figurine", "file", "fill", "filly", "filth", "final", "finance", "find", "finding", "fine", "finger", "fingernail", "finisher", "fir", "fire", "fireman", "fireplace", "firewall", "fish", "fishbone", "fisherman", "fishery", "fishing", "fishmonger", "fishnet", "fisting", "fix", "fixture", "flag", "flame", "flanker", "flare", "flash", "flat", "flatboat", "flavor", "flax", "fleck", "fleece", "flesh", "flight", "flintlock", "flip-flops", "flock", "flood", "floor", "floozie", "flower", "flu", "flugelhorn", "fluke", "flute", "fly", "flytrap", "foam", "fob", "focus", "fog", "fold", "folder", "fondue", "font", "food", "foot", "football", "footnote", "footrest", "foot-rest", "footstool", "foray", "force", "forearm", "forebear", "forecast", "forehead", "forest", "forestry", "forgery", "fork", "form", "formal", "format", "former", "fort", "fortnight", "fortress", "fortune", "forum", "foundation", "fountain", "fowl", "fox", "foxglove", "fragrance", "frame", "france", "fratricide", "fraudster", "frazzle", "freckle", "freedom", "freeplay", "freeze", "freezer", "freight", "freighter", "french", "freon", "fresco", "friction", "friday", "fridge", "friend", "friendship", "frigate", "fringe", "frock", "frog", "front", "frost", "frown", "fruit", "frustration", "fuel", "fulfillment", "full", "function", "fundraising", "funeral", "funny", "fur", "furnace", "furniture", "fusarium", "futon", "future", "gaffer", "gaiters", "gale", "gall-bladder", "galleon", "gallery", "galley", "gallon", "galoshes", "game", "gamebird", "gamma-ray", "gander", "gap", "garage", "garb", "garbage", "garden", "garlic", "garment", "garter", "gas", "gasoline", "gastropod", "gate", "gateway", "gather", "gauge", "gauntlet", "gazebo", "gazelle", "gear", "gearshift", "geese", "gelding", "gem", "gemini", "gemsbok", "gender", "gene", "general", "genetics", "geography", "geology", "geometry", "george", "geranium", "gerbil", "geriatrician", "german", "germany", "geyser", "ghana", "gherkin", "ghost", "giant", "gigantism", "ginseng", "giraffe", "girdle", "girl", "girlfriend", "git", "glad", "gladiolus", "gland", "glass", "glasses", "glen", "glider", "gliding", "glockenspiel", "glove", "gloves", "glue", "glut", "goal", "goat", "gobbler", "godmother", "goggles", "go-kart", "gold", "goldfish", "golf", "gondola", "gong", "good", "goodbye", "good-bye", "goodie", "goose", "gopher", "gore-tex", "gorilla", "gosling", "governance", "government", "governor", "gown", "grab-bag", "grade", "grain", "gram", "granddaughter", "grandfather", "grandmom", "grandmother", "grandson", "granny", "grape", "grapefruit", "graph", "graphic", "grass", "grasshopper", "grassland", "gray", "grease", "great", "great-grandfather", "great-grandmother", "greece", "greek", "green", "greenhouse", "grenade", "grey", "grief", "grill", "grip", "grit", "grocery", "ground", "group", "grouper", "grouse", "growth", "guarantee", "guatemalan", "guest", "guestbook", "guidance", "guide", "guilty", "guitar", "guitarist", "gum", "gumshoes", "gun", "gutter", "guy", "gym", "gymnast", "gynaecology", "gyro", "hacienda", "hacksaw", "hackwork", "hail", "hair", "haircut", "half", "half-brother", "half-sister", "halibut", "hall", "hallway", "hamaki", "hamburger", "hammer", "hammock", "hamster", "hand", "handball", "hand-holding", "handicap", "handle", "handlebar", "handmaiden", "handsaw", "hang", "harbor", "harbour", "hardboard", "hardcover", "hardening", "hardhat", "hard-hat", "hardware", "harm", "harmonica", "harmony", "harp", "harpooner", "harpsichord", "hassock", "hat", "hatbox", "hatchet", "hate", "haunt", "haversack", "hawk", "hay", "head", "headlight", "headline", "headrest", "health", "hearing", "heart", "heartache", "hearth", "hearthside", "heart-throb", "heartwood", "heat", "heater", "heaven", "heavy", "hedge", "hedgehog", "heel", "height", "heirloom", "helen", "helicopter", "helium", "hell", "hellcat", "helmet", "helo", "help", "hemp", "hen", "herb", "heron", "herring", "hexagon", "heyday", "hide", "high", "highlight", "high-rise", "highway", "hill", "himalayan", "hip", "hippodrome", "hippopotamus", "historian", "history", "hit", "hive", "hobbies", "hobbit", "hobby", "hockey", "hoe", "hog", "hold", "hole", "holiday", "home", "homework", "homogenate", "homonym", "honey", "honeybee", "honoree", "hood", "hoof", "hook", "hope", "hops", "horn", "hornet", "horse", "hose", "hosiery", "hospice", "hospital", "host", "hostel", "hostess", "hot", "hot-dog", "hotel", "hour", "hourglass", "house", "houseboat", "housing", "hovel", "hovercraft", "howitzer", "hub", "hubcap", "hugger", "human", "humidity", "humor", "hunger", "hurdler", "hurricane", "hurry", "hurt", "husband", "hut", "hutch", "hyacinth", "hybridisation", "hydrant", "hydraulics", "hydrofoil", "hydrogen", "hyena", "hygienic", "hyphenation", "hypochondria", "hypothermia", "ice", "icebreaker", "icecream", "ice-cream", "icicle", "icon", "idea", "ideal", "igloo", "ikebana", "illegal", "image", "imagination", "impact", "implement", "importance", "impress", "impression", "imprisonment", "improvement", "impudence", "impulse", "inbox", "incandescence", "inch", "income", "increase", "independence", "independent", "index", "india", "indication", "indigence", "indonesia", "industry", "infancy", "inflammation", "inflation", "information", "infusion", "inglenook", "ingrate", "initial", "initiative", "in-joke", "injury", "ink", "in-laws", "inlay", "inn", "innervation", "innocent", "input", "inquiry", "inscription", "insect", "inside", "insolence", "inspection", "inspector", "instance", "instruction", "instrument", "instrumentalist", "instrumentation", "insulation", "insurance", "insurgence", "intelligence", "intention", "interaction", "interactive", "interest", "interferometer", "interior", "interloper", "internal", "internet", "interpreter", "intervenor", "interview", "interviewer", "intestine", "intestines", "introduction", "invention", "inventor", "inventory", "investment", "invite", "invoice", "iPad", "iran", "iraq", "iridescence", "iris", "iron", "ironclad", "island", "israel", "issue", "italy", "jackal", "jacket", "jaguar", "jail", "jailhouse", "jam", "james", "january", "japan", "japanese", "jar", "jasmine", "jason", "jaw", "jeans", "jeep", "jeff", "jelly", "jellyfish", "jennifer", "jet", "jewel", "jewelry", "jiffy", "job", "jockey", "jodhpurs", "joey", "jogging", "john", "join", "joke", "joseph", "jot", "journey", "judge", "judgment", "judo", "juggernaut", "juice", "july", "jumbo", "jump", "jumper", "jumpsuit", "june", "junior", "junk", "junker", "junket", "jury", "justice", "jute", "kale", "kamikaze", "kangaroo", "karate", "karen", "kayak", "kazoo", "kendo", "kenneth", "kenya", "ketch", "ketchup", "kettle", "kettledrum", "kevin", "key", "keyboard", "keyboarding", "keystone", "kick", "kick-off", "kid", "kidney", "kidneys", "kielbasa", "kill", "kilogram", "kilometer", "kilt", "kimberly", "kimono", "kind", "king", "kingfish", "kiosk", "kiss", "kitchen", "kite", "kitten", "kitty", "kleenex", "klomps", "knee", "kneejerk", "knickers", "knife", "knife-edge", "knight", "knitting", "knot", "knowledge", "knuckle", "koala", "kohlrabi", "korean", "lab", "laborer", "lace", "lacquerware", "ladder", "lady", "ladybug", "lake", "lamb", "lamp", "lan", "lanai", "land", "landform", "landmine", "language", "lantern", "lap", "laparoscope", "lapdog", "laptop", "larch", "larder", "lark", "laryngitis", "lasagna", "latency", "latex", "lathe", "latte", "laugh", "laundry", "laura", "law", "lawn", "lawsuit", "lawyer", "layer", "lead", "leader", "leadership", "leaf", "league", "leaker", "learning", "leash", "leather", "leaver", "lecture", "leek", "leg", "legal", "legging", "legume", "lei", "lemon", "lemonade", "lemur", "length", "lentil", "leo", "leopard", "leotard", "leprosy", "let", "letter", "lettuce", "level", "lever", "leverage", "libra", "librarian", "library", "license", "lier", "life", "lift", "light", "lighting", "lightning", "lilac", "lily", "limit", "limo", "line", "linen", "liner", "link", "linseed", "lion", "lip", "lipstick", "liquid", "liquor", "lisa", "list", "literature", "litigation", "litter", "liver", "living", "lizard", "llama", "loaf", "loafer", "loan", "lobotomy", "lobster", "location", "lock", "locker", "locket", "locomotive", "locust", "loft", "log", "loggia", "loincloth", "look", "loss", "lot", "lotion", "lounge", "lout", "love", "low", "loyalty", "luck", "luggage", "lumber", "lumberman", "lunch", "luncheonette", "lunchroom", "lung", "lunge", "lute", "luttuce", "lycra", "lye", "lymphocyte", "lynx", "lyocell", "lyre", "lyric", "macadamia", "macaroni", "machine", "macrame", "macrofauna", "maelstrom", "maestro", "magazine", "magic", "magician", "maid", "maiden", "mail", "mailbox", "mailman", "maintenance", "major", "major-league", "makeup", "malaysia", "male", "mall", "mallet", "mambo", "mammoth", "man", "management", "manager", "mandarin", "mandolin", "mangrove", "manhunt", "maniac", "manicure", "manner", "manor", "mansard", "manservant", "mansion", "mantel", "mantle", "mantua", "manufacturer", "manx", "map", "maple", "maraca", "maracas", "marble", "march", "mare", "margaret", "margin", "maria", "mariachi", "marimba", "mark", "market", "marketing", "marksman", "marriage", "marsh", "marshland", "marxism", "mary", "mascara", "mask", "mass", "massage", "master", "mastication", "mastoid", "mat", "match", "material", "math", "mattock", "mattress", "maximum", "may", "maybe", "mayonnaise", "mayor", "meal", "meaning", "measure", "measurement", "meat", "mechanic", "media", "medicine", "medium", "meet", "meeting", "megalomaniac", "melody", "member", "membership", "memory", "men", "menorah", "mention", "menu", "mercury", "mess", "message", "metal", "metallurgist", "meteor", "meteorology", "meter", "methane", "method", "methodology", "metro", "metronome", "mexican", "mexico", "mezzanine", "mice", "michael", "michelle", "microlending", "microwave", "mid-course", "middle", "middleman", "midi", "midline", "midnight", "midwife", "might", "migrant", "mile", "milk", "milkshake", "millennium", "millimeter", "millisecond", "mime", "mimosa", "mind", "mine", "mini", "minibus", "minion", "mini-skirt", "minister", "minor", "minor-league", "mint", "minute", "mirror", "miscarriage", "miscommunication", "misfit", "misogyny", "misplacement", "misreading", "missile", "mission", "mist", "mistake", "mister", "miter", "mitten", "mix", "mixer", "mixture", "moat", "mobile", "moccasins", "mocha", "mode", "model", "modem", "mole", "mom", "moment", "monastery", "monasticism", "monday", "money", "monger", "monitor", "monkey", "monocle", "monotheism", "monsoon", "monster", "month", "mood", "moon", "moonscape", "moonshine", "mop", "Mormon", "morning", "morocco", "morsel", "mortise", "mosque", "mosquito", "most", "motel", "moth", "mother", "mother-in-law", "motion", "motor", "motorboat", "motorcar", "motorcycle", "mound", "mountain", "mouse", "mouser", "mousse", "moustache", "mouth", "mouton", "move", "mover", "movie", "mower", "mud", "mug", "mukluk", "mule", "multimedia", "muscle", "musculature", "museum", "music", "music-box", "musician", "music-making", "mustache", "mustard", "mutt", "myanmar", "mycoplasma", "nail", "name", "naming", "nancy", "nanoparticle", "napkin", "narcissus", "nation", "naturalisation", "nature", "neat", "neck", "necklace", "necktie", "necromancer", "need", "needle", "negligee", "negotiation", "neologism", "neon", "nepal", "nephew", "nerve", "nest", "net", "netball", "netbook", "netsuke", "network", "neurobiologist", "neuropathologist", "neuropsychiatry", "news", "newspaper", "newsprint", "newsstand", "nexus", "nic", "nicety", "niche", "nickel", "niece", "nigeria", "night", "nightclub", "nightgown", "nightingale", "nightlight", "nitrogen", "node", "noise", "nonbeliever", "nonconformist", "nondisclosure", "noodle", "normal", "norse", "north", "north america", "north korea", "nose", "note", "notebook", "notice", "notify", "notoriety", "nougat", "novel", "november", "nudge", "number", "numeracy", "numeric", "numismatist", "nurse", "nursery", "nurture", "nut", "nylon", "oak", "oar", "oasis", "oatmeal", "obi", "objective", "obligation", "oboe", "observation", "observatory", "occasion", "occupation", "ocean", "ocelot", "octagon", "octave", "octavo", "octet", "october", "octopus", "odometer", "oeuvre", "offence", "offer", "office", "official", "off-ramp", "oil", "okra", "oldie", "olive", "omega", "omelet", "oncology", "one", "onion", "open", "opening", "opera", "operation", "ophthalmologist", "opinion", "opium", "opossum", "opportunist", "opportunity", "opposite", "option", "orange", "orangutan", "orator", "orchard", "orchestra", "orchid", "order", "ordinary", "ordination", "organ", "organisation", "organization", "original", "ornament", "osmosis", "osprey", "ostrich", "others", "otter", "ottoman", "ounce", "outback", "outcome", "outfit", "outhouse", "outlay", "output", "outrigger", "outset", "outside", "oval", "ovary", "oven", "overcharge", "overclocking", "overcoat", "overexertion", "overflight", "overnighter", "overshoot", "owl", "owner", "ox", "oxen", "oxford", "oxygen", "oyster", "pacemaker", "pack", "package", "packet", "pad", "paddle", "paddock", "page", "pagoda", "pail", "pain", "paint", "painter", "painting", "paintwork", "pair", "pajama", "pajamas", "pakistan", "paleontologist", "paleontology", "palm", "pamphlet", "pan", "pancake", "pancreas", "panda", "panic", "pannier", "panpipe", "pansy", "panther", "panties", "pantry", "pants", "pantsuit", "panty", "pantyhose", "paper", "paperback", "parable", "parachute", "parade", "parallelogram", "paramedic", "parcel", "parchment", "parent", "parentheses", "park", "parka", "parrot", "parsnip", "part", "participant", "particle", "particular", "partner", "partridge", "party", "passage", "passbook", "passenger", "passion", "passive", "pasta", "paste", "pastor", "pastoralist", "pastry", "patch", "path", "patience", "patient", "patina", "patio", "patriarch", "patricia", "patrimony", "patriot", "patrol", "pattern", "paul", "pavement", "pavilion", "paw", "pawnshop", "payee", "payment", "pea", "peace", "peach", "peacoat", "peacock", "peak", "peanut", "pear", "pearl", "pedal", "pedestrian", "pediatrician", "peen", "peer", "peer-to-peer", "pegboard", "pelican", "pelt", "pen", "penalty", "pencil", "pendant", "pendulum", "penicillin", "pension", "pentagon", "peony", "people", "pepper", "percentage", "perception", "perch", "performance", "perfume", "period", "periodical", "peripheral", "permafrost", "permission", "permit", "perp", "person", "personality", "perspective", "peru", "pest", "pet", "petal", "petticoat", "pew", "pharmacist", "pharmacopoeia", "phase", "pheasant", "philippines", "philosopher", "philosophy", "phone", "photo", "photographer", "phrase", "physical", "physician", "physics", "pianist", "piano", "piccolo", "pick", "pickax", "picket", "pickle", "picture", "pie", "piece", "pier", "piety", "pig", "pigeon", "pike", "pile", "pilgrimage", "pillbox", "pillow", "pilot", "pimp", "pimple", "pin", "pinafore", "pince-nez", "pine", "pineapple", "pinecone", "ping", "pink", "pinkie", "pinstripe", "pint", "pinto", "pinworm", "pioneer", "pipe", "piracy", "piranha", "pisces", "piss", "pitch", "pitching", "pith", "pizza", "place", "plain", "plane", "planet", "plant", "plantation", "planter", "plaster", "plasterboard", "plastic", "plate", "platform", "platinum", "platypus", "play", "player", "playground", "playroom", "pleasure", "pleated", "plier", "plot", "plough", "plover", "plow", "plowman", "plume", "plunger", "plywood", "pneumonia", "pocket", "pocketbook", "pocket-watch", "poem", "poet", "poetry", "poignance", "point", "poison", "poisoning", "poland", "pole", "polenta", "police", "policeman", "policy", "polish", "politician", "politics", "pollution", "polo", "polyester", "pompom", "poncho", "pond", "pony", "poof", "pool", "popcorn", "poppy", "popsicle", "population", "populist", "porch", "porcupine", "port", "porter", "portfolio", "porthole", "position", "positive", "possession", "possibility", "postage", "postbox", "poster", "pot", "potato", "potential", "potty", "pouch", "poultry", "pound", "pounding", "powder", "power", "precedent", "precipitation", "preface", "preference", "prelude", "premeditation", "premier", "preoccupation", "preparation", "presence", "presentation", "president", "pressroom", "pressure", "pressurisation", "price", "pride", "priest", "priesthood", "primary", "primate", "prince", "princess", "principal", "print", "printer", "priority", "prison", "prize", "prizefight", "probation", "problem", "procedure", "process", "processing", "produce", "producer", "product", "production", "profession", "professional", "professor", "profit", "program", "project", "promotion", "prompt", "proof-reader", "propane", "property", "proposal", "prose", "prosecution", "protection", "protest", "protocol", "prow", "pruner", "pseudoscience", "psychiatrist", "psychoanalyst", "psychologist", "psychology", "ptarmigan", "publisher", "pudding", "puddle", "puffin", "pull", "pulley", "puma", "pump", "pumpkin", "pumpkinseed", "punch", "punishment", "pupa", "pupil", "puppy", "purchase", "puritan", "purple", "purpose", "purse", "push", "pusher", "put", "pvc", "pyjama", "pyramid", "quadrant", "quail", "quality", "quantity", "quart", "quarter", "quartz", "queen", "question", "quicksand", "quiet", "quill", "quilt", "quince", "quit", "quiver", "quotation", "rabbi", "rabbit", "raccoon", "race", "racer", "racing", "racist", "rack", "radar", "radiator", "radio", "radiosonde", "radish", "raffle", "raft", "rag", "rage", "rail", "railway", "raiment", "rain", "rainbow", "raincoat", "rainmaker", "rainstorm", "raise", "rake", "ram", "rambler", "ramie", "ranch", "random", "randomisation", "range", "rank", "raspberry", "rat", "rate", "ratio", "raven", "ravioli", "raw", "rawhide", "ray", "rayon", "reactant", "reaction", "read", "reading", "reality", "reamer", "rear", "reason", "receipt", "reception", "recess", "recipe", "recliner", "recognition", "recommendation", "record", "recorder", "recording", "recover", "recruit", "rectangle", "red", "redesign", "rediscovery", "reduction", "reef", "refectory", "reflection", "refrigerator", "refund", "refuse", "region", "register", "regret", "regular", "regulation", "reindeer", "reinscription", "reject", "relation", "relationship", "relative", "religion", "relish", "reminder", "rent", "repair", "reparation", "repeat", "replace", "replacement", "replication", "reply", "report", "representative", "reprocessing", "republic", "reputation", "request", "requirement", "resale", "research", "resident", "resist", "resolution", "resource", "respect", "respite", "response", "responsibility", "rest", "restaurant", "result", "retailer", "rethinking", "retina", "retouch", "return", "reveal", "revenant", "revenue", "review", "revolution", "revolve", "revolver", "reward", "rheumatism", "rhinoceros", "rhyme", "rhythm", "rice", "richard", "riddle", "ride", "rider", "ridge", "rifle", "right", "rim", "ring", "ringworm", "ripple", "rise", "riser", "risk", "river", "riverbed", "rivulet", "road", "roadway", "roast", "robe", "robert", "robin", "rock", "rocker", "rocket", "rocket-ship", "rod", "role", "roll", "roller", "romania", "ronald", "roof", "room", "rooster", "root", "rope", "rose", "rostrum", "rotate", "roundabout", "route", "router", "routine", "row", "rowboat", "royal", "rub", "rubber", "rubric", "ruckus", "ruffle", "rugby", "rule", "run", "runaway", "runner", "russia", "rutabaga", "ruth", "sabre", "sack", "sad", "saddle", "safe", "safety", "sage", "sagittarius", "sail", "sailboat", "sailor", "salad", "salary", "sale", "salesman", "salmon", "salon", "saloon", "salt", "samovar", "sampan", "sample", "samurai", "sand", "sandals", "sandbar", "sandra", "sandwich", "santa", "sarah", "sardine", "sari", "sarong", "sash", "satellite", "satin", "satire", "satisfaction", "saturday", "sauce", "saudi arabia", "sausage", "save", "saving", "savior", "saviour", "saw", "saxophone", "scale", "scallion", "scanner", "scarecrow", "scarf", "scarification", "scene", "scent", "schedule", "scheme", "schizophrenic", "schnitzel", "school", "schoolhouse", "schooner", "science", "scimitar", "scissors", "scooter", "score", "scorn", "scorpio", "scorpion", "scow", "scraper", "screamer", "screen", "screenwriting", "screw", "screwdriver", "screw-up", "scrim", "scrip", "sculpting", "sculpture", "sea", "seagull", "seal", "seaplane", "search", "seashore", "season", "seat", "second", "secretariat", "secretary", "section", "sectional", "sector", "secure", "security", "seed", "seeder", "segment", "select", "selection", "self", "sell", "semicircle", "semicolon", "senator", "sense", "sentence", "sepal", "september", "septicaemia", "series", "servant", "server", "service", "session", "set", "setting", "settler", "sewer", "sex", "shack", "shade", "shadow", "shadowbox", "shake", "shakedown", "shaker", "shallot", "shame", "shampoo", "shanty", "shape", "share", "shark", "sharon", "shawl", "shearling", "shears", "sheath", "shed", "sheep", "sheet", "shelf", "shell", "sherry", "shield", "shift", "shin", "shine", "shingle", "ship", "shirt", "shirtdress", "shoat", "shock", "shoe", "shoehorn", "shoe-horn", "shoelace", "shoemaker", "shoes", "shoestring", "shofar", "shoot", "shootdown", "shop", "shopper", "shopping", "shore", "shortage", "shorts", "shortwave", "shot", "shoulder", "shovel", "show", "shower", "show-stopper", "shred", "shrimp", "shrine", "siamese", "sibling", "sick", "side", "sideboard", "sideburns", "sidecar", "sidestream", "sidewalk", "siding", "sign", "signature", "signet", "significance", "signup", "silica", "silk", "silkworm", "sill", "silo", "silver", "simple", "sing", "singer", "single", "sink", "sir", "sister", "sister-in-law", "sit", "sitar", "situation", "size", "skate", "skiing", "skill", "skin", "skirt", "skulduggery", "skull", "skullcap", "skullduggery", "skunk", "sky", "skylight", "skyscraper", "skywalk", "slapstick", "slash", "slave", "sled", "sledge", "sleep", "sleet", "sleuth", "slice", "slider", "slime", "slip", "slipper", "slippers", "slope", "sloth", "smash", "smell", "smelting", "smile", "smock", "smog", "smoke", "smuggling", "snail", "snake", "snakebite", "sneakers", "sneeze", "snob", "snorer", "snow", "snowboarding", "snowflake", "snowman", "snowmobiling", "snowplow", "snowstorm", "snowsuit", "snuggle", "soap", "soccer", "society", "sociology", "sock", "socks", "soda", "sofa", "softball", "softdrink", "softening", "software", "soil", "soldier", "solid", "solitaire", "solution", "sombrero", "somersault", "somewhere", "son", "song", "songbird", "sonnet", "soot", "soprano", "sorbet", "sort", "soulmate", "sound", "soup", "source", "sourwood", "sousaphone", "south", "south africa", "south america", "south korea", "sow", "soy", "soybean", "space", "spacing", "spade", "spaghetti", "spain", "spandex", "spank", "spark", "sparrow", "spasm", "speaker", "speakerphone", "spear", "special", "specialist", "specific", "spectacle", "spectacles", "spectrograph", "speech", "speedboat", "spend", "sphere", "sphynx", "spider", "spike", "spinach", "spine", "spiral", "spirit", "spiritual", "spite", "spleen", "split", "sponge", "spoon", "sport", "spot", "spotlight", "spray", "spread", "spring", "sprinter", "sprout", "spruce", "spume", "spur", "spy", "square", "squash", "squatter", "squeegee", "squid", "squirrel", "stable", "stack", "stacking", "stadium", "staff", "stag", "stage", "stain", "stair", "staircase", "stallion", "stamen", "stamina", "stamp", "stance", "standoff", "star", "start", "starter", "state", "statement", "station", "station-wagon", "statistic", "statistician", "steak", "steal", "steam", "steamroller", "steel", "steeple", "stem", "stencil", "step", "step-aunt", "step-brother", "stepdaughter", "step-daughter", "step-father", "step-grandfather", "step-grandmother", "stepmother", "step-mother", "stepping-stone", "steps", "step-sister", "stepson", "step-son", "step-uncle", "steven", "stew", "stick", "stiletto", "still", "stinger", "stitch", "stock", "stocking", "stockings", "stock-in-trade", "stole", "stomach", "stone", "stonework", "stool", "stop", "stopsign", "stopwatch", "storage", "store", "storey", "storm", "story", "storyboard", "story-telling", "stove", "strait", "stranger", "strap", "strategy", "straw", "strawberry", "stream", "street", "streetcar", "stress", "stretch", "strike", "string", "strip", "structure", "struggle", "stud", "student", "studio", "study", "stuff", "stumbling", "sturgeon", "style", "styling", "stylus", "subcomponent", "subconscious", "submarine", "subroutine", "subsidence", "substance", "suburb", "subway", "success", "suck", "sudan", "suede", "suffocation", "sugar", "suggestion", "suit", "suitcase", "sultan", "summer", "sun", "sunbeam", "sunbonnet", "sunday", "sundial", "sunflower", "sunglasses", "sunlamp", "sunroom", "sunshine", "supermarket", "supply", "support", "supporter", "suppression", "surface", "surfboard", "surgeon", "surgery", "surname", "surprise", "susan", "sushi", "suspect", "suspenders", "sustainment", "SUV", "swallow", "swamp", "swan", "swath", "sweat", "sweater", "sweats", "sweatshirt", "sweatshop", "sweatsuit", "swedish", "sweets", "swell", "swim", "swimming", "swimsuit", "swing", "swiss", "switch", "switchboard", "swivel", "sword", "swordfish", "sycamore", "sympathy", "syndicate", "synergy", "synod", "syria", "syrup", "system", "tabby", "tabernacle", "table", "tablecloth", "tabletop", "tachometer", "tackle", "tadpole", "tail", "tailor", "tailspin", "taiwan", "tale", "talk", "tam", "tambour", "tambourine", "tam-o'-shanter", "tandem", "tangerine", "tank", "tanker", "tankful", "tank-top", "tanzania", "tap", "target", "tassel", "taste", "tatami", "tattler", "tattoo", "taurus", "tavern", "tax", "taxi", "taxicab", "tea", "teacher", "teaching", "team", "tear", "technician", "technologist", "technology", "teen", "teeth", "telephone", "telescreen", "teletype", "television", "teller", "temp", "temper", "temperature", "temple", "tempo", "temporariness", "temptress", "tendency", "tenement", "tennis", "tenor", "tension", "tent", "tepee", "term", "terracotta", "terrapin", "territory", "test", "text", "textbook", "texture", "thailand", "thanks", "thaw", "theater", "theism", "theme", "theoretician", "theory", "therapist", "thermals", "thermometer", "thigh", "thing", "thinking", "thistle", "thomas", "thong", "thongs", "thorn", "thought", "thread", "thrill", "throat", "throne", "thrush", "thumb", "thunder", "thunderbolt", "thunderhead", "thunderstorm", "thursday", "tiara", "tic", "ticket", "tie", "tiger", "tight", "tights", "tile", "till", "timbale", "time", "timeline", "timeout", "timer", "timpani", "tin", "tinderbox", "tinkle", "tintype", "tip", "tire", "tissue", "titanium", "title", "toad", "toast", "toe", "toenail", "toga", "togs", "toilet", "tom", "tomato", "tomography", "tomorrow", "tom-tom", "ton", "tongue", "toot", "tooth", "toothbrush", "toothpaste", "toothpick", "top", "top-hat", "topic", "topsail", "toque", "torchiere", "toreador", "tornado", "torso", "tortellini", "tortoise", "tosser", "total", "tote", "touch", "tough", "tough-guy", "tour", "tourist", "towel", "tower", "town", "townhouse", "tow-truck", "toy", "trachoma", "track", "tracksuit", "tractor", "trade", "tradition", "traditionalism", "traffic", "trail", "trailer", "train", "trainer", "training", "tram", "tramp", "transaction", "translation", "transmission", "transom", "transport", "transportation", "trapdoor", "trapezium", "trapezoid", "trash", "travel", "tray", "treatment", "tree", "trellis", "tremor", "trench", "trial", "triangle", "tribe", "trick", "trigonometry", "trim", "trinket", "trip", "tripod", "trolley", "trombone", "trooper", "trouble", "trousers", "trout", "trove", "trowel", "truck", "truckit", "trumpet", "trunk", "trust", "truth", "try", "t-shirt", "tsunami", "tub", "tuba", "tube", "tuesday", "tugboat", "tulip", "tummy", "tuna", "tune", "tune-up", "tunic", "tunnel", "turban", "turkey", "turkish", "turn", "turnip", "turnover", "turnstile", "turret", "turtle", "tussle", "tutu", "tuxedo", "tv", "twig", "twilight", "twine", "twist", "twister", "two", "typewriter", "typhoon", "tyvek", "uganda", "ukraine", "ukulele", "umbrella", "unblinking", "uncle", "underclothes", "underground", "underneath", "underpants", "underpass", "undershirt", "understanding", "underwear", "underwire", "unibody", "uniform", "union", "unit", "united kingdom", "university", "urn", "use", "user", "usher", "utensil", "uzbekistan", "vacation", "vacuum", "vagrant", "valance", "valley", "valuable", "value", "van", "vane", "vanity", "variation", "variety", "vase", "vast", "vault", "vaulting", "veal", "vegetable", "vegetarian", "vehicle", "veil", "vein", "veldt", "vellum", "velodrome", "velvet", "venezuela", "venezuelan", "venom", "veranda", "verdict", "vermicelli", "verse", "version", "vertigo", "verve", "vessel", "vest", "vestment", "vibe", "vibraphone", "vibration", "video", "vietnam", "view", "villa", "village", "vineyard", "vinyl", "viola", "violet", "violin", "virginal", "virgo", "virtue", "virus", "viscose", "vise", "vision", "visit", "visitor", "visor", "vixen", "voice", "volcano", "volleyball", "volume", "voyage", "vulture", "wad", "wafer", "waffle", "waist", "waistband", "waiter", "waitress", "walk", "walker", "walkway", "wall", "wallaby", "wallet", "walnut", "walrus", "wampum", "wannabe", "war", "warden", "warlock", "warm-up", "warning", "wash", "washbasin", "washcloth", "washer", "washtub", "wasp", "waste", "wastebasket", "watch", "watchmaker", "water", "waterbed", "waterfall", "waterskiing", "waterspout", "wave", "wax", "way", "weakness", "wealth", "weapon", "weasel", "weather", "web", "wedding", "wedge", "wednesday", "weed", "weeder", "weedkiller", "week", "weekend", "weekender", "weight", "weird", "well", "west", "western", "wet-bar", "wetsuit", "whale", "wharf", "wheel", "whip", "whirlpool", "whirlwind", "whisker", "whiskey", "whistle", "white", "whole", "wholesale", "wholesaler", "whorl", "wife", "wilderness", "will", "william", "willow", "wind", "windage", "wind-chime", "window", "windscreen", "windshield", "wine", "wing", "wingman", "wingtip", "winner", "winter", "wire", "wiseguy", "wish", "wisteria", "witch", "witch-hunt", "withdrawal", "witness", "wolf", "woman", "wombat", "women", "wood", "woodland", "woodshed", "woodwind", "wool", "woolen", "word", "work", "workbench", "worker", "workhorse", "worklife", "workshop", "world", "worm", "worthy", "wound", "wrap", "wraparound", "wrecker", "wren", "wrench", "wrestler", "wrinkle", "wrist", "writer", "writing", "wrong", "xylophone", "yacht", "yak", "yam", "yard", "yarmulke", "yarn", "yawl", "year", "yellow", "yesterday", "yew", "yin", "yogurt", "yoke", "young", "youth", "yurt", "zampone", "zebra", "zebrafish", "zephyr", "ziggurat", "zinc", "zipper", "zither", "zone", "zoo", "zoologist", "zoology", "zoot-suit", "zucchinis"
]
| 15,801
| 47,391
| 0.625973
|
parts = ["aardvark", "abacus", "abbey", "abdomen", "ability", "abolishment", "abroad", "accelerant", "accelerator", "accident", "accompanist", "accordion", "account", "accountant", "achieve", "achiever", "acid", "acknowledgment", "acoustic", "acoustics", "acrylic", "act", "action", "active", "activity", "actor", "actress", "acupuncture", "ad", "adapter", "addiction", "addition", "address", "adjustment", "administration", "adrenalin", "adult", "advancement", "advantage", "advertisement", "advertising", "advice", "affair", "affect", "afghanistan", "africa", "aftermath", "afternoon", "aftershave", "aftershock", "afterthought", "age", "agency", "agenda", "agent", "aglet", "agreement", "air", "airbag", "airbus", "airfare", "airforce", "airline", "airmail", "airplane", "airport", "airship", "alarm", "alb", "albatross", "alcohol", "alcove", "alder", "algebra", "algeria", "alibi", "allergist", "alley", "alligator", "alloy", "almanac", "almond", "alpaca", "alpenglow", "alpenhorn", "alpha", "alphabet", "alternative", "altitude", "alto", "aluminium", "aluminum", "ambassador", "ambition", "ambulance", "amendment", "america", "amount", "amusement", "anagram", "analgesia", "analog", "analysis", "analyst", "anatomy", "anesthesiology", "anethesiologist", "anger", "angiosperm", "angle", "angora", "angstrom", "anguish", "animal", "anime", "ankle", "anklet", "annual", "anorak", "answer", "ant", "antarctica", "anteater", "antechamber", "antelope", "anthony", "anthropology", "antler", "anxiety", "anybody", "anything", "anywhere", "apartment", "ape", "aperitif", "apology", "apparatus", "apparel", "appeal", "appearance", "appendix", "apple", "applewood", "appliance", "application", "appointment", "approval", "april", "apron", "apse", "aquarius", "aquifer", "arch", "archaeology", "archeology", "archer", "architect", "architecture", "arch-rival", "area", "argentina", "argument", "aries", "arithmetic", "arm", "armadillo", "armament", "armchair", "armoire", "armor", "arm-rest", "army", "arrival", "arrow", "art", "artichoke", "article", "artificer", "ascot", "ash", "ashram", "ashtray", "asia", "asparagus", "aspect", "asphalt", "assignment", "assistance", "assistant", "associate", "association", "assumption", "asterisk", "astrakhan", "astrolabe", "astrologer", "astrology", "astronomy", "atelier", "athelete", "athlete", "atm", "atmosphere", "atom", "atrium", "attachment", "attack", "attempt", "attendant", "attention", "attenuation", "attic", "attitude", "attorney", "attraction", "audience", "auditorium", "august", "aunt", "australia", "author", "authorisation", "authority", "authorization", "automaton", "avalanche", "avenue", "average", "awareness", "azimuth", "babe", "babies", "baboon", "babushka", "baby", "back", "backbone", "backdrop", "backpack", "bacon", "bad", "badge", "badger", "bafflement", "bag", "bagel", "bagpipe", "bagpipes", "bail", "bait", "bake", "baker", "bakery", "bakeware", "balaclava", "balalaika", "balance", "balcony", "balinese", "ball", "balloon", "ballpark", "bamboo", "banana", "band", "bandana", "bandanna", "bandolier", "bangladesh", "bangle", "banjo", "bank", "bankbook", "banker", "banquette", "baobab", "bar", "barbara", "barbeque", "barber", "barbiturate", "barge", "baritone", "barium", "barn", "barometer", "barracks", "barstool", "base", "baseball", "basement", "basin", "basis", "basket", "basketball", "bass", "bassinet", "bassoon", "bat", "bath", "bather", "bathhouse", "bathrobe", "bathroom", "bathtub", "batter", "battery", "batting", "battle", "battleship", "bay", "bayou", "beach", "bead", "beak", "beam", "bean", "beanie", "beanstalk", "bear", "beard", "beast", "beat", "beautician", "beauty", "beaver", "bed", "bedroom", "bee", "beech", "beef", "beer", "beet", "beetle", "beggar", "beginner", "begonia", "behavior", "beheading", "behest", "belfry", "belief", "believe", "bell", "belligerency", "bellows", "belly", "belt", "bench", "bend", "beneficiary", "benefit", "bengal", "beret", "berry", "bestseller", "best-seller", "betty", "beverage", "beyond", "bibliography", "bicycle", "bid", "bidet", "bifocals", "big", "big-rig", "bijou", "bike", "bikini", "bill", "billboard", "bin", "biology", "biplane", "birch", "bird", "birdbath", "birdcage", "birdhouse", "bird-watcher", "birth", "birthday", "bit", "bite", "black", "blackberry", "blackboard", "blackfish", "bladder", "blade", "blame", "blank", "blanket", "blazer", "blight", "blinker", "blister", "blizzard", "block", "blocker", "blood", "bloodflow", "bloom", "bloomers", "blossom", "blouse", "blow", "blowgun", "blowhole", "blue", "blueberry", "boar", "board", "boat", "boat-building", "boatload", "boatyard", "bobcat", "body", "bog", "bolero", "bolt", "bomb", "bomber", "bondsman", "bone", "bongo", "bonnet", "bonsai", "bonus", "boogeyman", "book", "bookcase", "bookend", "booklet", "booster", "boot", "bootee", "bootie", "boots", "booty", "border", "bore", "bosom", "botany", "bottle", "bottling", "bottom", "bottom-line", "boudoir", "bough", "boundary", "bow", "bower", "bowl", "bowler", "bowling", "bowtie", "box", "boxer", "boxspring", "boy", "boyfriend", "bra", "brace", "bracelet", "bracket", "brain", "brake", "branch", "brand", "brandy", "brass", "brassiere", "bratwurst", "brazil", "bread", "breadcrumb", "break", "breakfast", "breakpoint", "breast", "breastplate", "breath", "breeze", "bribery", "brick", "bricklaying", "bridge", "brief", "briefs", "brilliant", "british", "broccoli", "brochure", "broiler", "broker", "brome", "bronchitis", "bronco", "bronze", "brooch", "brood", "brook", "broom", "brother", "brother-in-law", "brow", "brown", "brush", "brushfire", "brushing", "bubble", "bucket", "buckle", "bud", "budget", "buffer", "buffet", "bug", "buggy", "bugle", "building", "bulb", "bull", "bulldozer", "bullet", "bull-fighter", "bumper", "bun", "bunch", "bungalow", "bunghole", "bunkhouse", "burglar", "burlesque", "burma", "burn", "burn-out", "burst", "bus", "bush", "business", "bust", "bustle", "butane", "butcher", "butter", "button", "buy", "buyer", "buzzard", "cabana", "cabbage", "cabin", "cabinet", "cable", "caboose", "cacao", "cactus", "caddy", "cadet", "cafe", "caftan", "cake", "calcification", "calculation", "calculator", "calculus", "calendar", "calf", "calico", "call", "calm", "camel", "cameo", "camera", "camp", "campaign", "campanile", "can", "canada", "canal", "cancel", "cancer", "candelabra", "candidate", "candle", "candy", "cane", "cannon", "canoe", "canon", "canopy", "canteen", "canvas", "cap", "cape", "capital", "capitulation", "capon", "cappelletti", "cappuccino", "capricorn", "captain", "caption", "car", "caravan", "carbon", "card", "cardboard", "cardigan", "care", "cargo", "carload", "carnation", "carol", "carotene", "carp", "carpenter", "carpet", "carport", "carriage", "carrier", "carrot", "carry", "cart", "cartilage", "cartload", "cartoon", "cartridge", "cascade", "case", "casement", "cash", "cashier", "casino", "casserole", "cassock", "cast", "castanet", "castanets", "castle", "cat", "catacomb", "catamaran", "category", "caterpillar", "cathedral", "catsup", "cattle", "cauliflower", "cause", "caution", "cave", "c-clamp", "cd", "ceiling", "celebration", "celeriac", "celery", "celeste", "cell", "cellar", "cello", "celsius", "cement", "cemetery", "cenotaph", "census", "cent", "centenarian", "center", "centimeter", "centurion", "century", "cephalopod", "ceramic", "cereal", "certification", "cesspool", "chador", "chafe", "chain", "chainstay", "chair", "chairlift", "chairman", "chairperson", "chairwoman", "chaise", "chalet", "chalice", "chalk", "champion", "championship", "chance", "chandelier", "change", "channel", "chap", "chapel", "chapter", "character", "chard", "charge", "charity", "charlatan", "charles", "charm", "chart", "chastity", "chasuble", "chateau", "chauffeur", "chauvinist", "check", "checkroom", "cheek", "cheese", "cheetah", "chef", "chemistry", "cheque", "cherries", "cherry", "chess", "chest", "chick", "chicken", "chicory", "chief", "chiffonier", "child", "childhood", "children", "chill", "chime", "chimpanzee", "chin", "china", "chinese", "chino", "chipmunk", "chit-chat", "chivalry", "chive", "chocolate", "choice", "choker", "chop", "chopstick", "chord", "chowder", "christmas", "christopher", "chrome", "chromolithograph", "chronograph", "chronometer", "chub", "chug", "church", "churn", "cicada", "cigarette", "cinema", "circle", "circulation", "circumference", "cirrus", "citizenship", "city", "civilisation", "clam", "clank", "clapboard", "clarinet", "clasp", "class", "classroom", "claus", "clave", "clavicle", "clavier", "cleaner", "cleat", "cleavage", "clef", "cleric", "clerk", "click", "client", "cliff", "climate", "climb", "clip", "clipper", "cloak", "cloakroom", "clock", "clockwork", "clogs", "cloister", "close", "closet", "cloth", "clothes", "clothing", "cloud", "cloudburst", "cloudy", "clove", "clover", "club", "clutch", "coach", "coal", "coast", "coat", "cob", "cobweb", "cockpit", "cockroach", "cocktail", "cocoa", "cod", "codon", "codpiece", "coevolution", "coffee", "coffin", "coil", "coin", "coinsurance", "coke", "cold", "coliseum", "collar", "collection", "college", "collision", "colloquia", "colombia", "colon", "colonisation", "colony", "color", "colt", "column", "columnist", "comb", "combat", "combination", "comfort", "comfortable", "comic", "comma", "command", "commercial", "commission", "committee", "communicant", "communication", "community", "company", "comparison", "competition", "competitor", "complaint", "complement", "complex", "component", "comportment", "composer", "composition", "compost", "compulsion", "computer", "comradeship", "concept", "concert", "conclusion", "concrete", "condition", "condominium", "condor", "conductor", "cone", "confectionery", "conference", "confidence", "confirmation", "conflict", "confusion", "conga", "congo", "congressman", "congressperson", "congresswoman", "conifer", "connection", "consent", "consequence", "console", "consonant", "conspirator", "constant", "constellation", "construction", "consul", "consulate", "contact lens", "contagion", "contest", "context", "continent", "contract", "contrail", "contrary", "contribution", "control", "convection", "conversation", "convert", "convertible", "cook", "cookie", "cooking", "coonskin", "cope", "cop-out", "copper", "co-producer", "copy", "copyright", "copywriter", "cord", "corduroy", "cork", "cormorant", "corn", "cornerstone", "cornet", "corral", "correspondent", "corridor", "corsage", "cost", "costume", "cot", "cottage", "cotton", "couch", "cougar", "cough", "council", "councilman", "councilor", "councilperson", "councilwoman", "counter", "counter-force", "countess", "country", "county", "couple", "courage", "course", "court", "cousin", "covariate", "cover", "coverall", "cow", "cowbell", "cowboy", "crab", "crack", "cracker", "crackers", "cradle", "craftsman", "crash", "crate", "cravat", "craw", "crawdad", "crayfish", "crayon", "cream", "creative", "creator", "creature", "creche", "credenza", "credit", "creditor", "creek", "creme brulee", "crest", "crew", "crib", "cribbage", "cricket", "cricketer", "crime", "criminal", "crinoline", "criteria", "criterion", "criticism", "crocodile", "crocus", "croissant", "crook", "crop", "cross", "cross-contamination", "cross-stitch", "crotch", "croup", "crow", "crowd", "crown", "crude", "crush", "cry", "crystallography", "cub", "cuban", "cuckoo", "cucumber", "cuff-links", "cultivar", "cultivator", "culture", "culvert", "cummerbund", "cup", "cupboard", "cupcake", "cupola", "curio", "curl", "curler", "currency", "current", "cursor", "curtain", "curve", "cushion", "custard", "custodian", "customer", "cut", "cuticle", "cutlet", "cutover", "cutting", "cyclamen", "cycle", "cyclone", "cylinder", "cymbal", "cymbals", "cynic", "cyst", "cytoplasm", "dad", "daffodil", "dagger", "dahlia", "daisy", "damage", "dame", "dance", "dancer", "danger", "daniel", "dark", "dart", "dash", "dashboard", "data", "database", "date", "daughter", "david", "day", "daybed", "dead", "deadline", "deal", "dealer", "dear", "death", "deathwatch", "deborah", "debt", "debtor", "decade", "december", "decimal", "decision", "deck", "declination", "decongestant", "decrease", "decryption", "dedication", "deer", "defense", "deficit", "definition", "deformation", "degree", "delete", "delivery", "demand", "demur", "den", "denim", "dentist", "deodorant", "department", "departure", "dependent", "deployment", "deposit", "depression", "depressive", "depth", "deputy", "derby", "derrick", "description", "desert", "design", "designer", "desire", "desk", "dessert", "destiny", "destroyer", "destruction", "detail", "detainment", "detective", "detention", "determination", "development", "deviance", "device", "dew", "dhow", "diadem", "diamond", "diaphragm", "diarist", "dibble", "dickey", "dictaphone", "diction", "dictionary", "diet", "dietician", "difference", "differential", "difficulty", "digestion", "digger", "digital", "dilapidation", "dill", "dime", "dimension", "dimple", "diner", "dinghy", "dinner", "dinosaur", "diploma", "dipstick", "direction", "director", "dirndl", "dirt", "disadvantage", "disarmament", "disaster", "disco", "disconnection", "discount", "discovery", "discrepancy", "discussion", "disease", "disembodiment", "disengagement", "disguise", "disgust", "dish", "dishes", "dishwasher", "disk", "display", "disposer", "distance", "distribution", "distributor", "district", "divan", "diver", "divide", "divider", "diving", "division", "dock", "doctor", "document", "doe", "dog", "dogsled", "dogwood", "doll", "dollar", "dolman", "dolphin", "domain", "donald", "donkey", "donna", "door", "doorknob", "doorpost", "dorothy", "dory", "dot", "double", "doubling", "doubt", "doubter", "downforce", "downgrade", "downtown", "draft", "dragon", "dragonfly", "dragster", "drain", "drake", "drama", "dramaturge", "draw", "drawbridge", "drawer", "drawing", "dream", "dredger", "dress", "dresser", "dressing", "drill", "drink", "drive", "driver", "driveway", "driving", "drizzle", "dromedary", "drop", "drug", "drum", "drummer", "drunk", "dry", "dryer", "duck", "duckling", "dud", "duffel", "dugout", "dulcimer", "dumbwaiter", "dump truck", "dune buggy", "dungarees", "dungeon", "duplexer", "dust", "dust storm", "duster", "duty", "dwarf", "dwelling", "dynamo", "eagle", "ear", "eardrum", "earmuffs", "earplug", "earrings", "earth", "earthquake", "earthworm", "ease", "easel", "east", "eave", "eavesdropper", "e-book", "ecclesia", "eclipse", "ecliptic", "economics", "ecumenist", "eddy", "edge", "edger", "editor", "editorial", "education", "edward", "eel", "effacement", "effect", "effective", "efficacy", "efficiency", "effort", "egg", "egghead", "eggnog", "eggplant", "egypt", "eight", "ejector", "elbow", "election", "electrocardiogram", "element", "elephant", "elevator", "elixir", "elizabeth", "elk", "ellipse", "elm", "elongation", "embossing", "emergence", "emergent", "emery", "emotion", "emphasis", "employ", "employee", "employer", "employment", "empowerment", "emu", "encirclement", "encyclopedia", "end", "endothelium", "enemy", "energy", "engine", "engineer", "engineering", "english", "enigma", "enquiry", "entertainment", "enthusiasm", "entrance", "entry", "environment", "epauliere", "epee", "ephemera", "ephemeris", "epoch", "eponym", "epoxy", "equinox", "equipment", "era", "e-reader", "error", "escape", "espadrille", "espalier", "establishment", "estate", "estimate", "estrogen", "estuary", "ethernet", "ethiopia", "euphonium", "eurocentrism", "europe", "evaluator", "evening", "evening-wear", "event", "eviction", "evidence", "evocation", "exam", "examination", "examiner", "example", "exchange", "excitement", "exclamation", "excuse", "executor", "exhaust", "ex-husband", "exile", "existence", "exit", "expansion", "expansionism", "experience", "expert", "explanation", "exposition", "expression", "extension", "extent", "extreme", "ex-wife", "eye", "eyeball", "eyebrow", "eyebrows", "eyeglasses", "eyelash", "eyelashes", "eyelid", "eyelids", "eyeliner", "eyestrain", "face", "facelift", "facet", "facilities", "facsimile", "fact", "factor", "factory", "faculty", "fahrenheit", "failure", "fairies", "fairy", "fall", "falling-out", "familiar", "family", "fan", "fang", "fanlight", "fanny", "fanny-pack", "farm", "farmer", "fascia", "fat", "father", "father-in-law", "fatigues", "faucet", "fault", "fawn", "fax", "fear", "feast", "feather", "feature", "february", "fedelini", "fedora", "feed", "feedback", "feeling", "feet", "felony", "female", "fen", "fence", "fencing", "fender", "ferry", "ferryboat", "fertilizer", "few", "fiber", "fiberglass", "fibre", "fiction", "fiddle", "field", "fifth", "fight", "fighter", "figurine", "file", "fill", "filly", "filth", "final", "finance", "find", "finding", "fine", "finger", "fingernail", "finisher", "fir", "fire", "fireman", "fireplace", "firewall", "fish", "fishbone", "fisherman", "fishery", "fishing", "fishmonger", "fishnet", "fisting", "fix", "fixture", "flag", "flame", "flanker", "flare", "flash", "flat", "flatboat", "flavor", "flax", "fleck", "fleece", "flesh", "flight", "flintlock", "flip-flops", "flock", "flood", "floor", "floozie", "flower", "flu", "flugelhorn", "fluke", "flute", "fly", "flytrap", "foam", "fob", "focus", "fog", "fold", "folder", "fondue", "font", "food", "foot", "football", "footnote", "footrest", "foot-rest", "footstool", "foray", "force", "forearm", "forebear", "forecast", "forehead", "forest", "forestry", "forgery", "fork", "form", "formal", "format", "former", "fort", "fortnight", "fortress", "fortune", "forum", "foundation", "fountain", "fowl", "fox", "foxglove", "fragrance", "frame", "france", "fratricide", "fraudster", "frazzle", "freckle", "freedom", "freeplay", "freeze", "freezer", "freight", "freighter", "french", "freon", "fresco", "friction", "friday", "fridge", "friend", "friendship", "frigate", "fringe", "frock", "frog", "front", "frost", "frown", "fruit", "frustration", "fuel", "fulfillment", "full", "function", "fundraising", "funeral", "funny", "fur", "furnace", "furniture", "fusarium", "futon", "future", "gaffer", "gaiters", "gale", "gall-bladder", "galleon", "gallery", "galley", "gallon", "galoshes", "game", "gamebird", "gamma-ray", "gander", "gap", "garage", "garb", "garbage", "garden", "garlic", "garment", "garter", "gas", "gasoline", "gastropod", "gate", "gateway", "gather", "gauge", "gauntlet", "gazebo", "gazelle", "gear", "gearshift", "geese", "gelding", "gem", "gemini", "gemsbok", "gender", "gene", "general", "genetics", "geography", "geology", "geometry", "george", "geranium", "gerbil", "geriatrician", "german", "germany", "geyser", "ghana", "gherkin", "ghost", "giant", "gigantism", "ginseng", "giraffe", "girdle", "girl", "girlfriend", "git", "glad", "gladiolus", "gland", "glass", "glasses", "glen", "glider", "gliding", "glockenspiel", "glove", "gloves", "glue", "glut", "goal", "goat", "gobbler", "godmother", "goggles", "go-kart", "gold", "goldfish", "golf", "gondola", "gong", "good", "goodbye", "good-bye", "goodie", "goose", "gopher", "gore-tex", "gorilla", "gosling", "governance", "government", "governor", "gown", "grab-bag", "grade", "grain", "gram", "granddaughter", "grandfather", "grandmom", "grandmother", "grandson", "granny", "grape", "grapefruit", "graph", "graphic", "grass", "grasshopper", "grassland", "gray", "grease", "great", "great-grandfather", "great-grandmother", "greece", "greek", "green", "greenhouse", "grenade", "grey", "grief", "grill", "grip", "grit", "grocery", "ground", "group", "grouper", "grouse", "growth", "guarantee", "guatemalan", "guest", "guestbook", "guidance", "guide", "guilty", "guitar", "guitarist", "gum", "gumshoes", "gun", "gutter", "guy", "gym", "gymnast", "gynaecology", "gyro", "hacienda", "hacksaw", "hackwork", "hail", "hair", "haircut", "half", "half-brother", "half-sister", "halibut", "hall", "hallway", "hamaki", "hamburger", "hammer", "hammock", "hamster", "hand", "handball", "hand-holding", "handicap", "handle", "handlebar", "handmaiden", "handsaw", "hang", "harbor", "harbour", "hardboard", "hardcover", "hardening", "hardhat", "hard-hat", "hardware", "harm", "harmonica", "harmony", "harp", "harpooner", "harpsichord", "hassock", "hat", "hatbox", "hatchet", "hate", "haunt", "haversack", "hawk", "hay", "head", "headlight", "headline", "headrest", "health", "hearing", "heart", "heartache", "hearth", "hearthside", "heart-throb", "heartwood", "heat", "heater", "heaven", "heavy", "hedge", "hedgehog", "heel", "height", "heirloom", "helen", "helicopter", "helium", "hell", "hellcat", "helmet", "helo", "help", "hemp", "hen", "herb", "heron", "herring", "hexagon", "heyday", "hide", "high", "highlight", "high-rise", "highway", "hill", "himalayan", "hip", "hippodrome", "hippopotamus", "historian", "history", "hit", "hive", "hobbies", "hobbit", "hobby", "hockey", "hoe", "hog", "hold", "hole", "holiday", "home", "homework", "homogenate", "homonym", "honey", "honeybee", "honoree", "hood", "hoof", "hook", "hope", "hops", "horn", "hornet", "horse", "hose", "hosiery", "hospice", "hospital", "host", "hostel", "hostess", "hot", "hot-dog", "hotel", "hour", "hourglass", "house", "houseboat", "housing", "hovel", "hovercraft", "howitzer", "hub", "hubcap", "hugger", "human", "humidity", "humor", "hunger", "hurdler", "hurricane", "hurry", "hurt", "husband", "hut", "hutch", "hyacinth", "hybridisation", "hydrant", "hydraulics", "hydrofoil", "hydrogen", "hyena", "hygienic", "hyphenation", "hypochondria", "hypothermia", "ice", "icebreaker", "icecream", "ice-cream", "icicle", "icon", "idea", "ideal", "igloo", "ikebana", "illegal", "image", "imagination", "impact", "implement", "importance", "impress", "impression", "imprisonment", "improvement", "impudence", "impulse", "inbox", "incandescence", "inch", "income", "increase", "independence", "independent", "index", "india", "indication", "indigence", "indonesia", "industry", "infancy", "inflammation", "inflation", "information", "infusion", "inglenook", "ingrate", "initial", "initiative", "in-joke", "injury", "ink", "in-laws", "inlay", "inn", "innervation", "innocent", "input", "inquiry", "inscription", "insect", "inside", "insolence", "inspection", "inspector", "instance", "instruction", "instrument", "instrumentalist", "instrumentation", "insulation", "insurance", "insurgence", "intelligence", "intention", "interaction", "interactive", "interest", "interferometer", "interior", "interloper", "internal", "internet", "interpreter", "intervenor", "interview", "interviewer", "intestine", "intestines", "introduction", "invention", "inventor", "inventory", "investment", "invite", "invoice", "iPad", "iran", "iraq", "iridescence", "iris", "iron", "ironclad", "island", "israel", "issue", "italy", "jackal", "jacket", "jaguar", "jail", "jailhouse", "jam", "james", "january", "japan", "japanese", "jar", "jasmine", "jason", "jaw", "jeans", "jeep", "jeff", "jelly", "jellyfish", "jennifer", "jet", "jewel", "jewelry", "jiffy", "job", "jockey", "jodhpurs", "joey", "jogging", "john", "join", "joke", "joseph", "jot", "journey", "judge", "judgment", "judo", "juggernaut", "juice", "july", "jumbo", "jump", "jumper", "jumpsuit", "june", "junior", "junk", "junker", "junket", "jury", "justice", "jute", "kale", "kamikaze", "kangaroo", "karate", "karen", "kayak", "kazoo", "kendo", "kenneth", "kenya", "ketch", "ketchup", "kettle", "kettledrum", "kevin", "key", "keyboard", "keyboarding", "keystone", "kick", "kick-off", "kid", "kidney", "kidneys", "kielbasa", "kill", "kilogram", "kilometer", "kilt", "kimberly", "kimono", "kind", "king", "kingfish", "kiosk", "kiss", "kitchen", "kite", "kitten", "kitty", "kleenex", "klomps", "knee", "kneejerk", "knickers", "knife", "knife-edge", "knight", "knitting", "knot", "knowledge", "knuckle", "koala", "kohlrabi", "korean", "lab", "laborer", "lace", "lacquerware", "ladder", "lady", "ladybug", "lake", "lamb", "lamp", "lan", "lanai", "land", "landform", "landmine", "language", "lantern", "lap", "laparoscope", "lapdog", "laptop", "larch", "larder", "lark", "laryngitis", "lasagna", "latency", "latex", "lathe", "latte", "laugh", "laundry", "laura", "law", "lawn", "lawsuit", "lawyer", "layer", "lead", "leader", "leadership", "leaf", "league", "leaker", "learning", "leash", "leather", "leaver", "lecture", "leek", "leg", "legal", "legging", "legume", "lei", "lemon", "lemonade", "lemur", "length", "lentil", "leo", "leopard", "leotard", "leprosy", "let", "letter", "lettuce", "level", "lever", "leverage", "libra", "librarian", "library", "license", "lier", "life", "lift", "light", "lighting", "lightning", "lilac", "lily", "limit", "limo", "line", "linen", "liner", "link", "linseed", "lion", "lip", "lipstick", "liquid", "liquor", "lisa", "list", "literature", "litigation", "litter", "liver", "living", "lizard", "llama", "loaf", "loafer", "loan", "lobotomy", "lobster", "location", "lock", "locker", "locket", "locomotive", "locust", "loft", "log", "loggia", "loincloth", "look", "loss", "lot", "lotion", "lounge", "lout", "love", "low", "loyalty", "luck", "luggage", "lumber", "lumberman", "lunch", "luncheonette", "lunchroom", "lung", "lunge", "lute", "luttuce", "lycra", "lye", "lymphocyte", "lynx", "lyocell", "lyre", "lyric", "macadamia", "macaroni", "machine", "macrame", "macrofauna", "maelstrom", "maestro", "magazine", "magic", "magician", "maid", "maiden", "mail", "mailbox", "mailman", "maintenance", "major", "major-league", "makeup", "malaysia", "male", "mall", "mallet", "mambo", "mammoth", "man", "management", "manager", "mandarin", "mandolin", "mangrove", "manhunt", "maniac", "manicure", "manner", "manor", "mansard", "manservant", "mansion", "mantel", "mantle", "mantua", "manufacturer", "manx", "map", "maple", "maraca", "maracas", "marble", "march", "mare", "margaret", "margin", "maria", "mariachi", "marimba", "mark", "market", "marketing", "marksman", "marriage", "marsh", "marshland", "marxism", "mary", "mascara", "mask", "mass", "massage", "master", "mastication", "mastoid", "mat", "match", "material", "math", "mattock", "mattress", "maximum", "may", "maybe", "mayonnaise", "mayor", "meal", "meaning", "measure", "measurement", "meat", "mechanic", "media", "medicine", "medium", "meet", "meeting", "megalomaniac", "melody", "member", "membership", "memory", "men", "menorah", "mention", "menu", "mercury", "mess", "message", "metal", "metallurgist", "meteor", "meteorology", "meter", "methane", "method", "methodology", "metro", "metronome", "mexican", "mexico", "mezzanine", "mice", "michael", "michelle", "microlending", "microwave", "mid-course", "middle", "middleman", "midi", "midline", "midnight", "midwife", "might", "migrant", "mile", "milk", "milkshake", "millennium", "millimeter", "millisecond", "mime", "mimosa", "mind", "mine", "mini", "minibus", "minion", "mini-skirt", "minister", "minor", "minor-league", "mint", "minute", "mirror", "miscarriage", "miscommunication", "misfit", "misogyny", "misplacement", "misreading", "missile", "mission", "mist", "mistake", "mister", "miter", "mitten", "mix", "mixer", "mixture", "moat", "mobile", "moccasins", "mocha", "mode", "model", "modem", "mole", "mom", "moment", "monastery", "monasticism", "monday", "money", "monger", "monitor", "monkey", "monocle", "monotheism", "monsoon", "monster", "month", "mood", "moon", "moonscape", "moonshine", "mop", "Mormon", "morning", "morocco", "morsel", "mortise", "mosque", "mosquito", "most", "motel", "moth", "mother", "mother-in-law", "motion", "motor", "motorboat", "motorcar", "motorcycle", "mound", "mountain", "mouse", "mouser", "mousse", "moustache", "mouth", "mouton", "move", "mover", "movie", "mower", "mud", "mug", "mukluk", "mule", "multimedia", "muscle", "musculature", "museum", "music", "music-box", "musician", "music-making", "mustache", "mustard", "mutt", "myanmar", "mycoplasma", "nail", "name", "naming", "nancy", "nanoparticle", "napkin", "narcissus", "nation", "naturalisation", "nature", "neat", "neck", "necklace", "necktie", "necromancer", "need", "needle", "negligee", "negotiation", "neologism", "neon", "nepal", "nephew", "nerve", "nest", "net", "netball", "netbook", "netsuke", "network", "neurobiologist", "neuropathologist", "neuropsychiatry", "news", "newspaper", "newsprint", "newsstand", "nexus", "nic", "nicety", "niche", "nickel", "niece", "nigeria", "night", "nightclub", "nightgown", "nightingale", "nightlight", "nitrogen", "node", "noise", "nonbeliever", "nonconformist", "nondisclosure", "noodle", "normal", "norse", "north", "north america", "north korea", "nose", "note", "notebook", "notice", "notify", "notoriety", "nougat", "novel", "november", "nudge", "number", "numeracy", "numeric", "numismatist", "nurse", "nursery", "nurture", "nut", "nylon", "oak", "oar", "oasis", "oatmeal", "obi", "objective", "obligation", "oboe", "observation", "observatory", "occasion", "occupation", "ocean", "ocelot", "octagon", "octave", "octavo", "octet", "october", "octopus", "odometer", "oeuvre", "offence", "offer", "office", "official", "off-ramp", "oil", "okra", "oldie", "olive", "omega", "omelet", "oncology", "one", "onion", "open", "opening", "opera", "operation", "ophthalmologist", "opinion", "opium", "opossum", "opportunist", "opportunity", "opposite", "option", "orange", "orangutan", "orator", "orchard", "orchestra", "orchid", "order", "ordinary", "ordination", "organ", "organisation", "organization", "original", "ornament", "osmosis", "osprey", "ostrich", "others", "otter", "ottoman", "ounce", "outback", "outcome", "outfit", "outhouse", "outlay", "output", "outrigger", "outset", "outside", "oval", "ovary", "oven", "overcharge", "overclocking", "overcoat", "overexertion", "overflight", "overnighter", "overshoot", "owl", "owner", "ox", "oxen", "oxford", "oxygen", "oyster", "pacemaker", "pack", "package", "packet", "pad", "paddle", "paddock", "page", "pagoda", "pail", "pain", "paint", "painter", "painting", "paintwork", "pair", "pajama", "pajamas", "pakistan", "paleontologist", "paleontology", "palm", "pamphlet", "pan", "pancake", "pancreas", "panda", "panic", "pannier", "panpipe", "pansy", "panther", "panties", "pantry", "pants", "pantsuit", "panty", "pantyhose", "paper", "paperback", "parable", "parachute", "parade", "parallelogram", "paramedic", "parcel", "parchment", "parent", "parentheses", "park", "parka", "parrot", "parsnip", "part", "participant", "particle", "particular", "partner", "partridge", "party", "passage", "passbook", "passenger", "passion", "passive", "pasta", "paste", "pastor", "pastoralist", "pastry", "patch", "path", "patience", "patient", "patina", "patio", "patriarch", "patricia", "patrimony", "patriot", "patrol", "pattern", "paul", "pavement", "pavilion", "paw", "pawnshop", "payee", "payment", "pea", "peace", "peach", "peacoat", "peacock", "peak", "peanut", "pear", "pearl", "pedal", "pedestrian", "pediatrician", "peen", "peer", "peer-to-peer", "pegboard", "pelican", "pelt", "pen", "penalty", "pencil", "pendant", "pendulum", "penicillin", "pension", "pentagon", "peony", "people", "pepper", "percentage", "perception", "perch", "performance", "perfume", "period", "periodical", "peripheral", "permafrost", "permission", "permit", "perp", "person", "personality", "perspective", "peru", "pest", "pet", "petal", "petticoat", "pew", "pharmacist", "pharmacopoeia", "phase", "pheasant", "philippines", "philosopher", "philosophy", "phone", "photo", "photographer", "phrase", "physical", "physician", "physics", "pianist", "piano", "piccolo", "pick", "pickax", "picket", "pickle", "picture", "pie", "piece", "pier", "piety", "pig", "pigeon", "pike", "pile", "pilgrimage", "pillbox", "pillow", "pilot", "pimp", "pimple", "pin", "pinafore", "pince-nez", "pine", "pineapple", "pinecone", "ping", "pink", "pinkie", "pinstripe", "pint", "pinto", "pinworm", "pioneer", "pipe", "piracy", "piranha", "pisces", "piss", "pitch", "pitching", "pith", "pizza", "place", "plain", "plane", "planet", "plant", "plantation", "planter", "plaster", "plasterboard", "plastic", "plate", "platform", "platinum", "platypus", "play", "player", "playground", "playroom", "pleasure", "pleated", "plier", "plot", "plough", "plover", "plow", "plowman", "plume", "plunger", "plywood", "pneumonia", "pocket", "pocketbook", "pocket-watch", "poem", "poet", "poetry", "poignance", "point", "poison", "poisoning", "poland", "pole", "polenta", "police", "policeman", "policy", "polish", "politician", "politics", "pollution", "polo", "polyester", "pompom", "poncho", "pond", "pony", "poof", "pool", "popcorn", "poppy", "popsicle", "population", "populist", "porch", "porcupine", "port", "porter", "portfolio", "porthole", "position", "positive", "possession", "possibility", "postage", "postbox", "poster", "pot", "potato", "potential", "potty", "pouch", "poultry", "pound", "pounding", "powder", "power", "precedent", "precipitation", "preface", "preference", "prelude", "premeditation", "premier", "preoccupation", "preparation", "presence", "presentation", "president", "pressroom", "pressure", "pressurisation", "price", "pride", "priest", "priesthood", "primary", "primate", "prince", "princess", "principal", "print", "printer", "priority", "prison", "prize", "prizefight", "probation", "problem", "procedure", "process", "processing", "produce", "producer", "product", "production", "profession", "professional", "professor", "profit", "program", "project", "promotion", "prompt", "proof-reader", "propane", "property", "proposal", "prose", "prosecution", "protection", "protest", "protocol", "prow", "pruner", "pseudoscience", "psychiatrist", "psychoanalyst", "psychologist", "psychology", "ptarmigan", "publisher", "pudding", "puddle", "puffin", "pull", "pulley", "puma", "pump", "pumpkin", "pumpkinseed", "punch", "punishment", "pupa", "pupil", "puppy", "purchase", "puritan", "purple", "purpose", "purse", "push", "pusher", "put", "pvc", "pyjama", "pyramid", "quadrant", "quail", "quality", "quantity", "quart", "quarter", "quartz", "queen", "question", "quicksand", "quiet", "quill", "quilt", "quince", "quit", "quiver", "quotation", "rabbi", "rabbit", "raccoon", "race", "racer", "racing", "racist", "rack", "radar", "radiator", "radio", "radiosonde", "radish", "raffle", "raft", "rag", "rage", "rail", "railway", "raiment", "rain", "rainbow", "raincoat", "rainmaker", "rainstorm", "raise", "rake", "ram", "rambler", "ramie", "ranch", "random", "randomisation", "range", "rank", "raspberry", "rat", "rate", "ratio", "raven", "ravioli", "raw", "rawhide", "ray", "rayon", "reactant", "reaction", "read", "reading", "reality", "reamer", "rear", "reason", "receipt", "reception", "recess", "recipe", "recliner", "recognition", "recommendation", "record", "recorder", "recording", "recover", "recruit", "rectangle", "red", "redesign", "rediscovery", "reduction", "reef", "refectory", "reflection", "refrigerator", "refund", "refuse", "region", "register", "regret", "regular", "regulation", "reindeer", "reinscription", "reject", "relation", "relationship", "relative", "religion", "relish", "reminder", "rent", "repair", "reparation", "repeat", "replace", "replacement", "replication", "reply", "report", "representative", "reprocessing", "republic", "reputation", "request", "requirement", "resale", "research", "resident", "resist", "resolution", "resource", "respect", "respite", "response", "responsibility", "rest", "restaurant", "result", "retailer", "rethinking", "retina", "retouch", "return", "reveal", "revenant", "revenue", "review", "revolution", "revolve", "revolver", "reward", "rheumatism", "rhinoceros", "rhyme", "rhythm", "rice", "richard", "riddle", "ride", "rider", "ridge", "rifle", "right", "rim", "ring", "ringworm", "ripple", "rise", "riser", "risk", "river", "riverbed", "rivulet", "road", "roadway", "roast", "robe", "robert", "robin", "rock", "rocker", "rocket", "rocket-ship", "rod", "role", "roll", "roller", "romania", "ronald", "roof", "room", "rooster", "root", "rope", "rose", "rostrum", "rotate", "roundabout", "route", "router", "routine", "row", "rowboat", "royal", "rub", "rubber", "rubric", "ruckus", "ruffle", "rugby", "rule", "run", "runaway", "runner", "russia", "rutabaga", "ruth", "sabre", "sack", "sad", "saddle", "safe", "safety", "sage", "sagittarius", "sail", "sailboat", "sailor", "salad", "salary", "sale", "salesman", "salmon", "salon", "saloon", "salt", "samovar", "sampan", "sample", "samurai", "sand", "sandals", "sandbar", "sandra", "sandwich", "santa", "sarah", "sardine", "sari", "sarong", "sash", "satellite", "satin", "satire", "satisfaction", "saturday", "sauce", "saudi arabia", "sausage", "save", "saving", "savior", "saviour", "saw", "saxophone", "scale", "scallion", "scanner", "scarecrow", "scarf", "scarification", "scene", "scent", "schedule", "scheme", "schizophrenic", "schnitzel", "school", "schoolhouse", "schooner", "science", "scimitar", "scissors", "scooter", "score", "scorn", "scorpio", "scorpion", "scow", "scraper", "screamer", "screen", "screenwriting", "screw", "screwdriver", "screw-up", "scrim", "scrip", "sculpting", "sculpture", "sea", "seagull", "seal", "seaplane", "search", "seashore", "season", "seat", "second", "secretariat", "secretary", "section", "sectional", "sector", "secure", "security", "seed", "seeder", "segment", "select", "selection", "self", "sell", "semicircle", "semicolon", "senator", "sense", "sentence", "sepal", "september", "septicaemia", "series", "servant", "server", "service", "session", "set", "setting", "settler", "sewer", "sex", "shack", "shade", "shadow", "shadowbox", "shake", "shakedown", "shaker", "shallot", "shame", "shampoo", "shanty", "shape", "share", "shark", "sharon", "shawl", "shearling", "shears", "sheath", "shed", "sheep", "sheet", "shelf", "shell", "sherry", "shield", "shift", "shin", "shine", "shingle", "ship", "shirt", "shirtdress", "shoat", "shock", "shoe", "shoehorn", "shoe-horn", "shoelace", "shoemaker", "shoes", "shoestring", "shofar", "shoot", "shootdown", "shop", "shopper", "shopping", "shore", "shortage", "shorts", "shortwave", "shot", "shoulder", "shovel", "show", "shower", "show-stopper", "shred", "shrimp", "shrine", "siamese", "sibling", "sick", "side", "sideboard", "sideburns", "sidecar", "sidestream", "sidewalk", "siding", "sign", "signature", "signet", "significance", "signup", "silica", "silk", "silkworm", "sill", "silo", "silver", "simple", "sing", "singer", "single", "sink", "sir", "sister", "sister-in-law", "sit", "sitar", "situation", "size", "skate", "skiing", "skill", "skin", "skirt", "skulduggery", "skull", "skullcap", "skullduggery", "skunk", "sky", "skylight", "skyscraper", "skywalk", "slapstick", "slash", "slave", "sled", "sledge", "sleep", "sleet", "sleuth", "slice", "slider", "slime", "slip", "slipper", "slippers", "slope", "sloth", "smash", "smell", "smelting", "smile", "smock", "smog", "smoke", "smuggling", "snail", "snake", "snakebite", "sneakers", "sneeze", "snob", "snorer", "snow", "snowboarding", "snowflake", "snowman", "snowmobiling", "snowplow", "snowstorm", "snowsuit", "snuggle", "soap", "soccer", "society", "sociology", "sock", "socks", "soda", "sofa", "softball", "softdrink", "softening", "software", "soil", "soldier", "solid", "solitaire", "solution", "sombrero", "somersault", "somewhere", "son", "song", "songbird", "sonnet", "soot", "soprano", "sorbet", "sort", "soulmate", "sound", "soup", "source", "sourwood", "sousaphone", "south", "south africa", "south america", "south korea", "sow", "soy", "soybean", "space", "spacing", "spade", "spaghetti", "spain", "spandex", "spank", "spark", "sparrow", "spasm", "speaker", "speakerphone", "spear", "special", "specialist", "specific", "spectacle", "spectacles", "spectrograph", "speech", "speedboat", "spend", "sphere", "sphynx", "spider", "spike", "spinach", "spine", "spiral", "spirit", "spiritual", "spite", "spleen", "split", "sponge", "spoon", "sport", "spot", "spotlight", "spray", "spread", "spring", "sprinter", "sprout", "spruce", "spume", "spur", "spy", "square", "squash", "squatter", "squeegee", "squid", "squirrel", "stable", "stack", "stacking", "stadium", "staff", "stag", "stage", "stain", "stair", "staircase", "stallion", "stamen", "stamina", "stamp", "stance", "standoff", "star", "start", "starter", "state", "statement", "station", "station-wagon", "statistic", "statistician", "steak", "steal", "steam", "steamroller", "steel", "steeple", "stem", "stencil", "step", "step-aunt", "step-brother", "stepdaughter", "step-daughter", "step-father", "step-grandfather", "step-grandmother", "stepmother", "step-mother", "stepping-stone", "steps", "step-sister", "stepson", "step-son", "step-uncle", "steven", "stew", "stick", "stiletto", "still", "stinger", "stitch", "stock", "stocking", "stockings", "stock-in-trade", "stole", "stomach", "stone", "stonework", "stool", "stop", "stopsign", "stopwatch", "storage", "store", "storey", "storm", "story", "storyboard", "story-telling", "stove", "strait", "stranger", "strap", "strategy", "straw", "strawberry", "stream", "street", "streetcar", "stress", "stretch", "strike", "string", "strip", "structure", "struggle", "stud", "student", "studio", "study", "stuff", "stumbling", "sturgeon", "style", "styling", "stylus", "subcomponent", "subconscious", "submarine", "subroutine", "subsidence", "substance", "suburb", "subway", "success", "suck", "sudan", "suede", "suffocation", "sugar", "suggestion", "suit", "suitcase", "sultan", "summer", "sun", "sunbeam", "sunbonnet", "sunday", "sundial", "sunflower", "sunglasses", "sunlamp", "sunroom", "sunshine", "supermarket", "supply", "support", "supporter", "suppression", "surface", "surfboard", "surgeon", "surgery", "surname", "surprise", "susan", "sushi", "suspect", "suspenders", "sustainment", "SUV", "swallow", "swamp", "swan", "swath", "sweat", "sweater", "sweats", "sweatshirt", "sweatshop", "sweatsuit", "swedish", "sweets", "swell", "swim", "swimming", "swimsuit", "swing", "swiss", "switch", "switchboard", "swivel", "sword", "swordfish", "sycamore", "sympathy", "syndicate", "synergy", "synod", "syria", "syrup", "system", "tabby", "tabernacle", "table", "tablecloth", "tabletop", "tachometer", "tackle", "tadpole", "tail", "tailor", "tailspin", "taiwan", "tale", "talk", "tam", "tambour", "tambourine", "tam-o'-shanter", "tandem", "tangerine", "tank", "tanker", "tankful", "tank-top", "tanzania", "tap", "target", "tassel", "taste", "tatami", "tattler", "tattoo", "taurus", "tavern", "tax", "taxi", "taxicab", "tea", "teacher", "teaching", "team", "tear", "technician", "technologist", "technology", "teen", "teeth", "telephone", "telescreen", "teletype", "television", "teller", "temp", "temper", "temperature", "temple", "tempo", "temporariness", "temptress", "tendency", "tenement", "tennis", "tenor", "tension", "tent", "tepee", "term", "terracotta", "terrapin", "territory", "test", "text", "textbook", "texture", "thailand", "thanks", "thaw", "theater", "theism", "theme", "theoretician", "theory", "therapist", "thermals", "thermometer", "thigh", "thing", "thinking", "thistle", "thomas", "thong", "thongs", "thorn", "thought", "thread", "thrill", "throat", "throne", "thrush", "thumb", "thunder", "thunderbolt", "thunderhead", "thunderstorm", "thursday", "tiara", "tic", "ticket", "tie", "tiger", "tight", "tights", "tile", "till", "timbale", "time", "timeline", "timeout", "timer", "timpani", "tin", "tinderbox", "tinkle", "tintype", "tip", "tire", "tissue", "titanium", "title", "toad", "toast", "toe", "toenail", "toga", "togs", "toilet", "tom", "tomato", "tomography", "tomorrow", "tom-tom", "ton", "tongue", "toot", "tooth", "toothbrush", "toothpaste", "toothpick", "top", "top-hat", "topic", "topsail", "toque", "torchiere", "toreador", "tornado", "torso", "tortellini", "tortoise", "tosser", "total", "tote", "touch", "tough", "tough-guy", "tour", "tourist", "towel", "tower", "town", "townhouse", "tow-truck", "toy", "trachoma", "track", "tracksuit", "tractor", "trade", "tradition", "traditionalism", "traffic", "trail", "trailer", "train", "trainer", "training", "tram", "tramp", "transaction", "translation", "transmission", "transom", "transport", "transportation", "trapdoor", "trapezium", "trapezoid", "trash", "travel", "tray", "treatment", "tree", "trellis", "tremor", "trench", "trial", "triangle", "tribe", "trick", "trigonometry", "trim", "trinket", "trip", "tripod", "trolley", "trombone", "trooper", "trouble", "trousers", "trout", "trove", "trowel", "truck", "truckit", "trumpet", "trunk", "trust", "truth", "try", "t-shirt", "tsunami", "tub", "tuba", "tube", "tuesday", "tugboat", "tulip", "tummy", "tuna", "tune", "tune-up", "tunic", "tunnel", "turban", "turkey", "turkish", "turn", "turnip", "turnover", "turnstile", "turret", "turtle", "tussle", "tutu", "tuxedo", "tv", "twig", "twilight", "twine", "twist", "twister", "two", "typewriter", "typhoon", "tyvek", "uganda", "ukraine", "ukulele", "umbrella", "unblinking", "uncle", "underclothes", "underground", "underneath", "underpants", "underpass", "undershirt", "understanding", "underwear", "underwire", "unibody", "uniform", "union", "unit", "united kingdom", "university", "urn", "use", "user", "usher", "utensil", "uzbekistan", "vacation", "vacuum", "vagrant", "valance", "valley", "valuable", "value", "van", "vane", "vanity", "variation", "variety", "vase", "vast", "vault", "vaulting", "veal", "vegetable", "vegetarian", "vehicle", "veil", "vein", "veldt", "vellum", "velodrome", "velvet", "venezuela", "venezuelan", "venom", "veranda", "verdict", "vermicelli", "verse", "version", "vertigo", "verve", "vessel", "vest", "vestment", "vibe", "vibraphone", "vibration", "video", "vietnam", "view", "villa", "village", "vineyard", "vinyl", "viola", "violet", "violin", "virginal", "virgo", "virtue", "virus", "viscose", "vise", "vision", "visit", "visitor", "visor", "vixen", "voice", "volcano", "volleyball", "volume", "voyage", "vulture", "wad", "wafer", "waffle", "waist", "waistband", "waiter", "waitress", "walk", "walker", "walkway", "wall", "wallaby", "wallet", "walnut", "walrus", "wampum", "wannabe", "war", "warden", "warlock", "warm-up", "warning", "wash", "washbasin", "washcloth", "washer", "washtub", "wasp", "waste", "wastebasket", "watch", "watchmaker", "water", "waterbed", "waterfall", "waterskiing", "waterspout", "wave", "wax", "way", "weakness", "wealth", "weapon", "weasel", "weather", "web", "wedding", "wedge", "wednesday", "weed", "weeder", "weedkiller", "week", "weekend", "weekender", "weight", "weird", "well", "west", "western", "wet-bar", "wetsuit", "whale", "wharf", "wheel", "whip", "whirlpool", "whirlwind", "whisker", "whiskey", "whistle", "white", "whole", "wholesale", "wholesaler", "whorl", "wife", "wilderness", "will", "william", "willow", "wind", "windage", "wind-chime", "window", "windscreen", "windshield", "wine", "wing", "wingman", "wingtip", "winner", "winter", "wire", "wiseguy", "wish", "wisteria", "witch", "witch-hunt", "withdrawal", "witness", "wolf", "woman", "wombat", "women", "wood", "woodland", "woodshed", "woodwind", "wool", "woolen", "word", "work", "workbench", "worker", "workhorse", "worklife", "workshop", "world", "worm", "worthy", "wound", "wrap", "wraparound", "wrecker", "wren", "wrench", "wrestler", "wrinkle", "wrist", "writer", "writing", "wrong", "xylophone", "yacht", "yak", "yam", "yard", "yarmulke", "yarn", "yawl", "year", "yellow", "yesterday", "yew", "yin", "yogurt", "yoke", "young", "youth", "yurt", "zampone", "zebra", "zebrafish", "zephyr", "ziggurat", "zinc", "zipper", "zither", "zone", "zoo", "zoologist", "zoology", "zoot-suit", "zucchinis"
]
| true
| true
|
790c42a7e25133236c3a70d8809f641e2b627c54
| 4,973
|
py
|
Python
|
eljur.py
|
yakuri354/EljurCLI
|
3ebf47c28b4c81a324f15d59bf5a6c49bc259dd6
|
[
"MIT"
] | null | null | null |
eljur.py
|
yakuri354/EljurCLI
|
3ebf47c28b4c81a324f15d59bf5a6c49bc259dd6
|
[
"MIT"
] | null | null | null |
eljur.py
|
yakuri354/EljurCLI
|
3ebf47c28b4c81a324f15d59bf5a6c49bc259dd6
|
[
"MIT"
] | null | null | null |
from colored import fg, stylize, attr
import requests as rq
from yaspin import yaspin
version = "0.4beta"
greeting = stylize("""
╭────────────────────────────────────────────────────────────────╮
│ Добро пожаловать в │
│ _____ _ _ ____ _ ___ │
│ | ____| |(_)_ _ _ __ / ___| | |_ _| │
│ | _| | || | | | | '__| | | | | | │
│ | |___| || | |_| | | | |___| |___ | | │
│ |_____|_|/ |\__,_|_| \____|_____|___| │
│ |__/ │
│ вер. 0.6.1beta │
╰────────────────────────────────────────────────────────────────╯
""", fg("magenta"), attr("bold"))
API_URL = "https://markbook.eljur.ru/apiv3/"
DEVKEY = "9235e26e80ac2c509c48fe62db23642c"
VENDOR = "markbook"
lessons = []
time_style = fg("green") + attr("bold")
room_style = fg("yellow") + attr("bold")
day_of_week_style = fg("orange_1") + attr("bold")
non_academ_style = fg("cyan")
separator_style = fg("medium_purple_1") + attr("bold")
separator = stylize("::", separator_style)
# yakuri354 - Для обозначения времени окон
# butukay - Я бы назвал это костылём } < Немогу удалить
# yakuri354 ~> ну я согласен, но а как ещё окна отображать
lessons_time = {
"1": "08:30:00_09:10:00",
"2": "09:30:00_10:10:00",
"3": "10:20:00_11:00:00",
"4": "11:10:00_11:50:00",
"5": "12:00:00_12:40:00",
"6": "13:30:00_14:10:00",
"7": "14:20:00_15:00:00",
"8": "15:10:00_15:50:00",
"9": "16:20:00_17:00:00",
"10": "17:10:00_17:50:00",
"11": "18:00:00_18:40:00"
}
# Объект ученика
class Student:
def __init__(self, token=None, login=None):
self.token = token
self.login = login
rules_params = {
"DEVKEY": DEVKEY,
"vendor": VENDOR,
"out_format": "json",
"auth_token": self.token,
}
user_info = rq.get(API_URL + "getrules", params=rules_params).json()["response"]
if user_info["error"] is not None or "":
print("Ошибка при получении информации об ученике: " + user_info["error"])
raise LookupError(user_info["error"])
self.student_id = user_info["result"]["name"]
self.name = user_info["result"]["relations"]["students"][self.student_id]["title"]
self.grade = user_info["result"]["relations"]["students"][self.student_id]["class"]
self.city = user_info["result"]["city"]
self.email = user_info["result"]["email"]
self.fullname = user_info["result"]["title"]
self.gender = user_info["result"]["gender"]
self.school = user_info["result"]["relations"]["schools"][0]["title"]
def __str__(self):
text = ""
text += "\nИмя: " + self.name
text += "\nКласс: " + str(self.grade)
text += "\nГород: " + self.city
text += "\nШкола: " + self.school
text += "\nПол: " + "Мужской" if self.gender == "male" else "Женский"
text += "\nЛогин: " + self.login
text += "\nЭл. Почта: " + self.email
return text
def get_schedule(self, date=None, silent=False):
load_spinner = None
if not silent:
load_spinner = yaspin(text="Загрузка...")
load_spinner.text = "[Получение дневника из журнала...]"
if date is None:
date = "20191118-20191124"
diary = rq.get(
API_URL + "getschedule",
params={
"devkey": DEVKEY,
"vendor": VENDOR,
"out_format": "json",
"student": self.student_id,
"auth_token": self.token,
"days": date,
"rings": "true"
}
).json()['response']
if diary["error"] is not None:
if not silent:
load_spinner.text = ""
load_spinner.fail(stylize("Ошибка получения расписания: " + diary["error"], fg("red")))
raise LookupError(diary["error"])
schedule = diary['result']['students'][str(self.student_id)]
if not silent:
load_spinner.text = ""
load_spinner.ok(stylize("[Расписание успешно получено!] ", fg("green")))
return schedule
# Получение информации об ученике через запрос getrules
def info(self, extended=False):
if not extended:
return self.student_id, self.name, self.grade
else:
return {
"student_id": self.student_id,
"fullname": self.name,
"grade": self.grade,
"city": self.city,
"email": self.email,
"gender": self.gender,
"school": self.school
}
| 32.503268
| 103
| 0.48703
|
from colored import fg, stylize, attr
import requests as rq
from yaspin import yaspin
version = "0.4beta"
greeting = stylize("""
╭────────────────────────────────────────────────────────────────╮
│ Добро пожаловать в │
│ _____ _ _ ____ _ ___ │
│ | ____| |(_)_ _ _ __ / ___| | |_ _| │
│ | _| | || | | | | '__| | | | | | │
│ | |___| || | |_| | | | |___| |___ | | │
│ |_____|_|/ |\__,_|_| \____|_____|___| │
│ |__/ │
│ вер. 0.6.1beta │
╰────────────────────────────────────────────────────────────────╯
""", fg("magenta"), attr("bold"))
API_URL = "https://markbook.eljur.ru/apiv3/"
DEVKEY = "9235e26e80ac2c509c48fe62db23642c"
VENDOR = "markbook"
lessons = []
time_style = fg("green") + attr("bold")
room_style = fg("yellow") + attr("bold")
day_of_week_style = fg("orange_1") + attr("bold")
non_academ_style = fg("cyan")
separator_style = fg("medium_purple_1") + attr("bold")
separator = stylize("::", separator_style)
# yakuri354 - Для обозначения времени окон
# butukay - Я бы назвал это костылём } < Немогу удалить
# yakuri354 ~> ну я согласен, но а как ещё окна отображать
lessons_time = {
"1": "08:30:00_09:10:00",
"2": "09:30:00_10:10:00",
"3": "10:20:00_11:00:00",
"4": "11:10:00_11:50:00",
"5": "12:00:00_12:40:00",
"6": "13:30:00_14:10:00",
"7": "14:20:00_15:00:00",
"8": "15:10:00_15:50:00",
"9": "16:20:00_17:00:00",
"10": "17:10:00_17:50:00",
"11": "18:00:00_18:40:00"
}
# Объект ученика
class Student:
def __init__(self, token=None, login=None):
self.token = token
self.login = login
rules_params = {
"DEVKEY": DEVKEY,
"vendor": VENDOR,
"out_format": "json",
"auth_token": self.token,
}
user_info = rq.get(API_URL + "getrules", params=rules_params).json()["response"]
if user_info["error"] is not None or "":
print("Ошибка при получении информации об ученике: " + user_info["error"])
raise LookupError(user_info["error"])
self.student_id = user_info["result"]["name"]
self.name = user_info["result"]["relations"]["students"][self.student_id]["title"]
self.grade = user_info["result"]["relations"]["students"][self.student_id]["class"]
self.city = user_info["result"]["city"]
self.email = user_info["result"]["email"]
self.fullname = user_info["result"]["title"]
self.gender = user_info["result"]["gender"]
self.school = user_info["result"]["relations"]["schools"][0]["title"]
def __str__(self):
text = ""
text += "\nИмя: " + self.name
text += "\nКласс: " + str(self.grade)
text += "\nГород: " + self.city
text += "\nШкола: " + self.school
text += "\nПол: " + "Мужской" if self.gender == "male" else "Женский"
text += "\nЛогин: " + self.login
text += "\nЭл. Почта: " + self.email
return text
def get_schedule(self, date=None, silent=False):
load_spinner = None
if not silent:
load_spinner = yaspin(text="Загрузка...")
load_spinner.text = "[Получение дневника из журнала...]"
if date is None:
date = "20191118-20191124"
diary = rq.get(
API_URL + "getschedule",
params={
"devkey": DEVKEY,
"vendor": VENDOR,
"out_format": "json",
"student": self.student_id,
"auth_token": self.token,
"days": date,
"rings": "true"
}
).json()['response']
if diary["error"] is not None:
if not silent:
load_spinner.text = ""
load_spinner.fail(stylize("Ошибка получения расписания: " + diary["error"], fg("red")))
raise LookupError(diary["error"])
schedule = diary['result']['students'][str(self.student_id)]
if not silent:
load_spinner.text = ""
load_spinner.ok(stylize("[Расписание успешно получено!] ", fg("green")))
return schedule
# Получение информации об ученике через запрос getrules
def info(self, extended=False):
if not extended:
return self.student_id, self.name, self.grade
else:
return {
"student_id": self.student_id,
"fullname": self.name,
"grade": self.grade,
"city": self.city,
"email": self.email,
"gender": self.gender,
"school": self.school
}
| true
| true
|
790c43a7702a86adff91d0b3810f3c9d5a0a635d
| 3,313
|
py
|
Python
|
ga4stpg/edgeset/mutate.py
|
GiliardGodoi/steiner-problem-with-evol
|
6b34f0342b791ae6c65b6d016c37a4d45ab5cdad
|
[
"MIT"
] | null | null | null |
ga4stpg/edgeset/mutate.py
|
GiliardGodoi/steiner-problem-with-evol
|
6b34f0342b791ae6c65b6d016c37a4d45ab5cdad
|
[
"MIT"
] | 5
|
2021-01-26T17:28:32.000Z
|
2021-03-14T13:46:48.000Z
|
ga4stpg/edgeset/mutate.py
|
GiliardGodoi/steiner-problem-with-evol
|
6b34f0342b791ae6c65b6d016c37a4d45ab5cdad
|
[
"MIT"
] | 1
|
2021-01-25T16:35:59.000Z
|
2021-01-25T16:35:59.000Z
|
from random import choice, randint, sample, shuffle
from ga4stpg import graph
from ga4stpg.edgeset import EdgeSet
from ga4stpg.graph import UGraph
from ga4stpg.graph.disjointsets import DisjointSets
from ga4stpg.graph.priorityqueue import PriorityQueue
class MutatitionReplaceByLowerEdge:
def __init__(self, stpg):
self.stpg = stpg
def __call__(self, chromosome : EdgeSet):
assert isinstance(chromosome, EdgeSet), f'Chromosome must be EdgeSet type: Given was <{type(chromosome)}>'
graph = self.stpg.graph
disjoints = DisjointSets()
candidates = PriorityQueue()
result = EdgeSet()
for v in chromosome.vertices:
disjoints.make_set(v)
index = randint(0, len(chromosome))
for i, edge in enumerate(chromosome):
u, v = edge
if i == index:
candidates.push(graph.weight(u,v), (u,v))
else:
disjoints.union(u, v)
result.add(u, v)
components = disjoints.get_disjoint_sets()
lesser_idx = min(components, key=lambda item: len(components[item]))
keys = components.keys() - set([lesser_idx])
for key in keys:
for v in components[lesser_idx]:
for w in graph.adjacent_to(v):
if w in components[key]:
candidates.push(graph.weight(w, v), (v, w))
while len(disjoints.get_disjoint_sets()) >= 2 or candidates:
w, v = candidates.pop()
if disjoints.find(w) != disjoints.find(v):
result.add(w, v)
disjoints.union(w,v)
return result
class MutationReplaceByRandomEdge:
def __init__(self, stpg) -> None:
self.stpg = stpg
def __call__(self, chromosome : EdgeSet):
assert isinstance(chromosome, EdgeSet), f'Chromosome must be EdgeSet type: Given was <{type(chromosome)}>'
graph = self.stpg.graph
disjoints = DisjointSets()
result = EdgeSet()
for v in chromosome.vertices:
disjoints.make_set(v)
index = randint(0, len(chromosome))
for i, edge in enumerate(chromosome):
if i != index :
v, u = edge
disjoints.union(v, u)
result.add(v, u)
components = disjoints.get_disjoint_sets()
lesser_idx = min(components, key=lambda item: len(components[item]))
lesser = components[lesser_idx]
keys = components.keys() - set([lesser_idx])
for key in keys:
candidates = list()
greater_component = components[key]
for v in lesser:
for w in graph.adjacent_to(v):
if w in greater_component:
candidates.append((v, w))
while candidates:
shuffle(candidates)
v, w = candidates.pop()
if disjoints.find(v) != disjoints.find(w):
result.add(v, w)
disjoints.union(v, w)
break
# if len(disjoints.get_disjoint_sets()) >= 2:
# result.add(selected_edge)
return result
| 32.480392
| 115
| 0.55086
|
from random import choice, randint, sample, shuffle
from ga4stpg import graph
from ga4stpg.edgeset import EdgeSet
from ga4stpg.graph import UGraph
from ga4stpg.graph.disjointsets import DisjointSets
from ga4stpg.graph.priorityqueue import PriorityQueue
class MutatitionReplaceByLowerEdge:
def __init__(self, stpg):
self.stpg = stpg
def __call__(self, chromosome : EdgeSet):
assert isinstance(chromosome, EdgeSet), f'Chromosome must be EdgeSet type: Given was <{type(chromosome)}>'
graph = self.stpg.graph
disjoints = DisjointSets()
candidates = PriorityQueue()
result = EdgeSet()
for v in chromosome.vertices:
disjoints.make_set(v)
index = randint(0, len(chromosome))
for i, edge in enumerate(chromosome):
u, v = edge
if i == index:
candidates.push(graph.weight(u,v), (u,v))
else:
disjoints.union(u, v)
result.add(u, v)
components = disjoints.get_disjoint_sets()
lesser_idx = min(components, key=lambda item: len(components[item]))
keys = components.keys() - set([lesser_idx])
for key in keys:
for v in components[lesser_idx]:
for w in graph.adjacent_to(v):
if w in components[key]:
candidates.push(graph.weight(w, v), (v, w))
while len(disjoints.get_disjoint_sets()) >= 2 or candidates:
w, v = candidates.pop()
if disjoints.find(w) != disjoints.find(v):
result.add(w, v)
disjoints.union(w,v)
return result
class MutationReplaceByRandomEdge:
def __init__(self, stpg) -> None:
self.stpg = stpg
def __call__(self, chromosome : EdgeSet):
assert isinstance(chromosome, EdgeSet), f'Chromosome must be EdgeSet type: Given was <{type(chromosome)}>'
graph = self.stpg.graph
disjoints = DisjointSets()
result = EdgeSet()
for v in chromosome.vertices:
disjoints.make_set(v)
index = randint(0, len(chromosome))
for i, edge in enumerate(chromosome):
if i != index :
v, u = edge
disjoints.union(v, u)
result.add(v, u)
components = disjoints.get_disjoint_sets()
lesser_idx = min(components, key=lambda item: len(components[item]))
lesser = components[lesser_idx]
keys = components.keys() - set([lesser_idx])
for key in keys:
candidates = list()
greater_component = components[key]
for v in lesser:
for w in graph.adjacent_to(v):
if w in greater_component:
candidates.append((v, w))
while candidates:
shuffle(candidates)
v, w = candidates.pop()
if disjoints.find(v) != disjoints.find(w):
result.add(v, w)
disjoints.union(v, w)
break
return result
| true
| true
|
790c457d923615e78d6f99e149e0ed18ab4b0fed
| 1,782
|
py
|
Python
|
setup.py
|
scotthavens/pysnobal
|
9cff1e6cb2f1da4240132af4e1d2f5740092d2ef
|
[
"CC0-1.0"
] | 1
|
2022-01-26T16:47:44.000Z
|
2022-01-26T16:47:44.000Z
|
setup.py
|
scotthavens/pysnobal
|
9cff1e6cb2f1da4240132af4e1d2f5740092d2ef
|
[
"CC0-1.0"
] | null | null | null |
setup.py
|
scotthavens/pysnobal
|
9cff1e6cb2f1da4240132af4e1d2f5740092d2ef
|
[
"CC0-1.0"
] | 1
|
2022-02-08T22:31:27.000Z
|
2022-02-08T22:31:27.000Z
|
#!/usr/bin/env python
"""The setup script."""
from setuptools import find_packages, setup
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
with open('requirements.txt') as requirements_file:
requirements = requirements_file.read()
setup_requirements = ['setuptools_scm', ]
test_requirements = ['pytest>=3', 'pytest-runner']
setup(
author="USDA ARS Northwest Watershed Research Center",
author_email='snow@ars.usda.gov',
python_requires='>=3.6',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Take 2 for pysnobal in pure python",
entry_points={
'console_scripts': [
'pysnobal=pysnobal.cli:main',
],
},
install_requires=requirements,
license="CC0 1.0",
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
keywords='pysnobal',
name='pysnobal',
packages=find_packages(include=['pysnobal', 'pysnobal.*']),
package_data={
'pysnobal': [
'./pysnobal_core_config.ini'
]
},
use_scm_version={
'local_scheme': 'node-and-date',
},
setup_requires=setup_requirements,
test_suite='pysnobal.tests',
tests_require=test_requirements,
url='https://github.com/scotthavens/pysnobal',
zip_safe=False,
)
| 28.741935
| 74
| 0.645903
|
from setuptools import find_packages, setup
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
with open('requirements.txt') as requirements_file:
requirements = requirements_file.read()
setup_requirements = ['setuptools_scm', ]
test_requirements = ['pytest>=3', 'pytest-runner']
setup(
author="USDA ARS Northwest Watershed Research Center",
author_email='snow@ars.usda.gov',
python_requires='>=3.6',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Take 2 for pysnobal in pure python",
entry_points={
'console_scripts': [
'pysnobal=pysnobal.cli:main',
],
},
install_requires=requirements,
license="CC0 1.0",
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
keywords='pysnobal',
name='pysnobal',
packages=find_packages(include=['pysnobal', 'pysnobal.*']),
package_data={
'pysnobal': [
'./pysnobal_core_config.ini'
]
},
use_scm_version={
'local_scheme': 'node-and-date',
},
setup_requires=setup_requirements,
test_suite='pysnobal.tests',
tests_require=test_requirements,
url='https://github.com/scotthavens/pysnobal',
zip_safe=False,
)
| true
| true
|
790c464155b4c53e05e220db0e61d7c6e7d07ff6
| 1,081
|
py
|
Python
|
08-def-type-hints/charindex.py
|
hdcpereira/example-code-2e
|
ade7558007f149e5ab7465dd9618d432f169eb9f
|
[
"MIT"
] | null | null | null |
08-def-type-hints/charindex.py
|
hdcpereira/example-code-2e
|
ade7558007f149e5ab7465dd9618d432f169eb9f
|
[
"MIT"
] | null | null | null |
08-def-type-hints/charindex.py
|
hdcpereira/example-code-2e
|
ade7558007f149e5ab7465dd9618d432f169eb9f
|
[
"MIT"
] | null | null | null |
"""
``name_index`` builds an inverted index mapping words to sets of Unicode
characters which contain that word in their names. For example::
>>> index = name_index(32, 65)
>>> sorted(index['SIGN'])
['#', '$', '%', '+', '<', '=', '>']
>>> sorted(index['DIGIT'])
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
>>> index['DIGIT'] & index['EIGHT']
{'8'}
"""
# tag::CHARINDEX[]
import sys
import re
import unicodedata
from typing import Dict, Set, Iterator
RE_WORD = re.compile('\w+')
STOP_CODE = sys.maxunicode + 1
def tokenize(text: str) -> Iterator[str]: # <1>
"""return iterable of uppercased words"""
for match in RE_WORD.finditer(text):
yield match.group().upper()
def name_index(start: int = 32, end: int = STOP_CODE) -> Dict[str, Set[str]]:
index: Dict[str, Set[str]] = {} # <2>
for char in (chr(i) for i in range(start, end)):
if name := unicodedata.name(char, ''): # <3>
for word in tokenize(name):
index.setdefault(word, set()).add(char)
return index
# end::CHARINDEX[]
| 30.027778
| 77
| 0.572618
|
import sys
import re
import unicodedata
from typing import Dict, Set, Iterator
RE_WORD = re.compile('\w+')
STOP_CODE = sys.maxunicode + 1
def tokenize(text: str) -> Iterator[str]:
for match in RE_WORD.finditer(text):
yield match.group().upper()
def name_index(start: int = 32, end: int = STOP_CODE) -> Dict[str, Set[str]]:
index: Dict[str, Set[str]] = {}
for char in (chr(i) for i in range(start, end)):
if name := unicodedata.name(char, ''):
for word in tokenize(name):
index.setdefault(word, set()).add(char)
return index
| true
| true
|
790c49e7e2bd1c9c458bd29b205c837703810620
| 89
|
py
|
Python
|
mini-scripts/Python_Datetime_day_number_of_year.txt.py
|
Web-Dev-Collaborative/PYTHON_PRAC
|
856f902fb43dcccae168d34ee6aacc02427a7ac6
|
[
"MIT"
] | 5
|
2021-06-02T23:44:25.000Z
|
2021-12-27T16:21:57.000Z
|
mini-scripts/Python_Datetime_day_number_of_year.txt.py
|
Web-Dev-Collaborative/PYTHON_PRAC
|
856f902fb43dcccae168d34ee6aacc02427a7ac6
|
[
"MIT"
] | 22
|
2021-05-31T01:33:25.000Z
|
2021-10-18T18:32:39.000Z
|
mini-scripts/Python_Datetime_day_number_of_year.txt.py
|
Web-Dev-Collaborative/PYTHON_PRAC
|
856f902fb43dcccae168d34ee6aacc02427a7ac6
|
[
"MIT"
] | 3
|
2021-06-19T03:37:47.000Z
|
2021-08-31T00:49:51.000Z
|
import datetime
x = datetime.datetime.now()
print(x.strftime("%j"))
# Author: Bryan G
| 12.714286
| 27
| 0.685393
|
import datetime
x = datetime.datetime.now()
print(x.strftime("%j"))
| true
| true
|
790c4a2dd6f4a1b8d8a6ad5c61ac80eebeaab8a8
| 63
|
py
|
Python
|
wunderkafka/producers/__init__.py
|
severstal-digital/wunderkafka
|
8c56fa4559a8576af7f005fd916bf97127576278
|
[
"Apache-2.0"
] | null | null | null |
wunderkafka/producers/__init__.py
|
severstal-digital/wunderkafka
|
8c56fa4559a8576af7f005fd916bf97127576278
|
[
"Apache-2.0"
] | null | null | null |
wunderkafka/producers/__init__.py
|
severstal-digital/wunderkafka
|
8c56fa4559a8576af7f005fd916bf97127576278
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains wunderkafka producer's boilerplate."""
| 31.5
| 62
| 0.777778
| true
| true
|
|
790c4c9fb5edb13bf70de321a0f4fc05978d80e0
| 13,229
|
py
|
Python
|
test/test_process.py
|
rockyplum/vampy-host
|
a410d680be2c15d76e31488db789ed30e6f34910
|
[
"BSD-4-Clause-UC"
] | 16
|
2016-11-19T07:24:54.000Z
|
2021-07-09T23:30:48.000Z
|
test/test_process.py
|
rockyplum/vampy-host
|
a410d680be2c15d76e31488db789ed30e6f34910
|
[
"BSD-4-Clause-UC"
] | 6
|
2017-04-05T12:00:38.000Z
|
2022-01-13T17:51:34.000Z
|
test/test_process.py
|
rockyplum/vampy-host
|
a410d680be2c15d76e31488db789ed30e6f34910
|
[
"BSD-4-Clause-UC"
] | 1
|
2017-04-03T16:33:51.000Z
|
2017-04-03T16:33:51.000Z
|
import vamp
import numpy as np
import vamp.frames as fr
plugin_key = "vamp-test-plugin:vamp-test-plugin"
plugin_key_freq = "vamp-test-plugin:vamp-test-plugin-freq"
rate = 44100
# Throughout this file we have the assumption that the plugin gets run with a
# blocksize of 1024, and with a step of 1024 for the time-domain version or 512
# for the frequency-domain one. That is certainly expected to be the norm for a
# plugin like this that declares no preference, and the Python Vamp module is
# expected to follow the norm.
blocksize = 1024
def input_data(n):
# start at 1, not 0 so that all elts are non-zero
return np.arange(n) + 1
def test_process_n():
buf = input_data(blocksize)
results = list(vamp.process_audio(buf, rate, plugin_key, "input-summary"))
assert len(results) == 1
def test_process_freq_n():
buf = input_data(blocksize)
results = list(vamp.process_audio(buf, rate, plugin_key_freq, "input-summary", {}))
assert len(results) == 2 # one complete block starting at zero, one half-full
def test_process_default_output():
# If no output is specified, we should get the first one (instants)
buf = input_data(blocksize)
results = list(vamp.process_audio(buf, rate, plugin_key, "", {}))
assert len(results) == 10
for i in range(len(results)):
expectedTime = vamp.vampyhost.RealTime('seconds', i * 1.5)
actualTime = results[i]["timestamp"]
assert expectedTime == actualTime
def test_process_summary_param():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key, "input-summary", { "produce_output": 0 }))
assert len(results) == 0
def test_process_multi_summary_param():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ "input-summary" ], { "produce_output": 0 }))
assert len(results) == 0
def test_process_summary_param_bool():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key, "input-summary", { "produce_output": False }))
assert len(results) == 0
def test_process_multi_summary_param_bool():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ "input-summary" ], { "produce_output": False }))
assert len(results) == 0
def test_process_summary():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key, "input-summary", {}))
assert len(results) == 10
for i in range(len(results)):
#
# each feature has a single value, equal to the number of non-zero elts
# in the input block (which is all of them, i.e. the blocksize) plus
# the first elt (which is i * blockSize + 1)
#
expected = blocksize + i * blocksize + 1
actual = results[i]["values"][0]
assert actual == expected
def test_process_frames_summary():
buf = input_data(blocksize * 10)
ff = fr.frames_from_array(buf, blocksize, blocksize)
results = list(vamp.process_frames(ff, rate, blocksize, plugin_key, "input-summary", {}))
assert len(results) == 10
for i in range(len(results)):
#
# each feature has a single value, equal to the number of non-zero elts
# in the input block (which is all of them, i.e. the blocksize) plus
# the first elt (which is i * blockSize + 1)
#
expected = blocksize + i * blocksize + 1
actual = results[i]["values"][0]
assert actual == expected
def test_process_multi_summary():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ "input-summary" ], {}))
assert len(results) == 10
for i in range(len(results)):
#
# each feature has a single value, equal to the number of non-zero elts
# in the input block (which is all of them, i.e. the blocksize) plus
# the first elt (which is i * blockSize + 1)
#
expected = blocksize + i * blocksize + 1
actual = results[i]["input-summary"]["values"][0]
assert actual == expected
def test_process_frames_multi_summary():
buf = input_data(blocksize * 10)
ff = fr.frames_from_array(buf, blocksize, blocksize)
results = list(vamp.process_frames_multiple_outputs(ff, rate, blocksize, plugin_key, [ "input-summary" ], {}))
assert len(results) == 10
for i in range(len(results)):
#
# each feature has a single value, equal to the number of non-zero elts
# in the input block (which is all of them, i.e. the blocksize) plus
# the first elt (which is i * blockSize + 1)
#
expected = blocksize + i * blocksize + 1
actual = results[i]["input-summary"]["values"][0]
assert actual == expected
def test_process_freq_summary():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key_freq, "input-summary", {}))
assert len(results) == 20
for i in range(len(results)):
#
# sort of as above, but much much subtler:
#
# * the input block is converted to frequency domain but then converted
# back within the plugin, so the values being reported are time-domain
# ones but with windowing and FFT shift
#
# * the effect of FFT shift is that the first element in the
# re-converted frame is actually the one that was at the start of the
# second half of the original frame
#
# * and the last block is only half-full, so the "first" elt in that
# one, which actually comes from just after the middle of the block,
# will be zero
#
# * windowing does not affect the value of the first elt, because
# (before fft shift) it came from the peak of the window shape where
# the window value is 1
#
# * but windowing does affect the number of non-zero elts, because the
# asymmetric window used has one value very close to zero in it
#
# * the step size (the increment in input value from one block to the
# next) is only half the block size
#
expected = i * (blocksize/2) + blocksize/2 + 1 # "first" elt
if (i == len(results)-1):
expected = 0
expected = expected + blocksize - 1 # non-zero elts
actual = results[i]["values"][0]
eps = 1e-6
assert abs(actual - expected) < eps
def test_process_freq_summary_shift():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key_freq, "input-summary", {}, process_timestamp_method = vamp.vampyhost.SHIFT_DATA))
assert len(results) == 20
for i in range(len(results)):
# as test_process_freq_summary, except that the input is effectively
# padded by the adapter with an additional half-blocksize of zeros
# before conversion
if i == 0:
# this block doesn't interact at all well with our test, we get
# spurious low values in the block converted back within the plugin
# because of the big discontinuity & window ripple after fftshift
pass
else:
expected = (i-1) * (blocksize/2) + blocksize/2 + 1 # for "first" elt
expected = expected + blocksize - 1 # non-zero elts
actual = results[i]["values"][0]
eps = 1e-6
assert abs(actual - expected) < eps
def test_process_multi_freq_summary():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key_freq, [ "input-summary" ], {}))
assert len(results) == 20
for i in range(len(results)):
expected = i * (blocksize/2) + blocksize/2 + 1 # "first" elt
if (i == len(results)-1):
expected = 0
expected = expected + blocksize - 1 # non-zero elts
actual = results[i]["input-summary"]["values"][0]
eps = 1e-6
assert abs(actual - expected) < eps
def test_process_timestamps():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key, "input-timestamp", {}))
assert len(results) == 10
for i in range(len(results)):
# The timestamp should be the frame number of the first frame in the
# input buffer
expected = i * blocksize
actual = results[i]["values"][0]
assert actual == expected
def test_process_multi_timestamps():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ "input-timestamp" ]))
assert len(results) == 10
for i in range(len(results)):
# The timestamp should be the frame number of the first frame in the
# input buffer
expected = i * blocksize
actual = results[i]["input-timestamp"]["values"][0]
assert actual == expected
def test_process_freq_timestamps():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key_freq, "input-timestamp", {}))
assert len(results) == 20
for i in range(len(results)):
# The timestamp should be the frame number of the frame just beyond
# half-way through the input buffer
expected = i * (blocksize/2) + blocksize/2
actual = results[i]["values"][0]
if actual == 2047 and expected == 2048:
print("This test fails because of a bug in the Vamp plugin SDK. Please update to SDK version 2.6.")
assert actual == expected
def test_process_freq_shift_timestamps():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key_freq, "input-timestamp", process_timestamp_method = vamp.vampyhost.SHIFT_DATA))
assert len(results) == 20
for i in range(len(results)):
# The timestamp should be the frame number of the frame at the start of
# the input buffer
expected = i * (blocksize/2)
actual = results[i]["values"][0]
if actual == 2047 and expected == 2048:
print("This test fails because of a bug in the Vamp plugin SDK. Please update to SDK version 2.6.")
assert actual == expected
def test_process_multi_freq_timestamps():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key_freq, [ "input-timestamp" ], {}))
assert len(results) == 20
for i in range(len(results)):
# The timestamp should be the frame number of the frame just beyond
# half-way through the input buffer
expected = i * (blocksize/2) + blocksize/2
actual = results[i]["input-timestamp"]["values"][0]
if actual == 2047 and expected == 2048:
print("This test fails because of a bug in the Vamp plugin SDK. Please update to SDK version 2.6.")
assert actual == expected
def test_process_blocksize_timestamps():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key, "input-timestamp", {}, block_size = blocksize * 2)) # step size defaults to block size
assert len(results) == 5
for i in range(len(results)):
# The timestamp should be the frame number of the first frame in the
# input buffer
expected = i * blocksize * 2
actual = results[i]["values"][0]
assert actual == expected
def test_process_stepsize_timestamps():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key, "input-timestamp", {}, step_size = int(blocksize / 2)))
assert len(results) == 20
for i in range(len(results)):
# The timestamp should be the frame number of the first frame in the
# input buffer
expected = (i * blocksize) / 2
actual = results[i]["values"][0]
assert actual == expected
def test_process_stepsize_blocksize_timestamps():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key, "input-timestamp", {}, block_size = blocksize * 2, step_size = int(blocksize / 2)))
assert len(results) == 20
for i in range(len(results)):
# The timestamp should be the frame number of the first frame in the
# input buffer
expected = (i * blocksize) / 2
actual = results[i]["values"][0]
assert actual == expected
def test_process_multiple_outputs():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ "input-summary", "input-timestamp" ], {}))
assert len(results) == 20
si = 0
ti = 0
for r in results:
assert "input-summary" in r or "input-timestamp" in r
if "input-summary" in r:
expected = blocksize + si * blocksize + 1
actual = r["input-summary"]["values"][0]
assert actual == expected
si = si + 1
if "input-timestamp" in r:
expected = ti * blocksize
actual = r["input-timestamp"]["values"][0]
assert actual == expected
ti = ti + 1
| 43.516447
| 147
| 0.642452
|
import vamp
import numpy as np
import vamp.frames as fr
plugin_key = "vamp-test-plugin:vamp-test-plugin"
plugin_key_freq = "vamp-test-plugin:vamp-test-plugin-freq"
rate = 44100
blocksize = 1024
def input_data(n):
return np.arange(n) + 1
def test_process_n():
buf = input_data(blocksize)
results = list(vamp.process_audio(buf, rate, plugin_key, "input-summary"))
assert len(results) == 1
def test_process_freq_n():
buf = input_data(blocksize)
results = list(vamp.process_audio(buf, rate, plugin_key_freq, "input-summary", {}))
assert len(results) == 2
def test_process_default_output():
buf = input_data(blocksize)
results = list(vamp.process_audio(buf, rate, plugin_key, "", {}))
assert len(results) == 10
for i in range(len(results)):
expectedTime = vamp.vampyhost.RealTime('seconds', i * 1.5)
actualTime = results[i]["timestamp"]
assert expectedTime == actualTime
def test_process_summary_param():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key, "input-summary", { "produce_output": 0 }))
assert len(results) == 0
def test_process_multi_summary_param():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ "input-summary" ], { "produce_output": 0 }))
assert len(results) == 0
def test_process_summary_param_bool():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key, "input-summary", { "produce_output": False }))
assert len(results) == 0
def test_process_multi_summary_param_bool():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ "input-summary" ], { "produce_output": False }))
assert len(results) == 0
def test_process_summary():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key, "input-summary", {}))
assert len(results) == 10
for i in range(len(results)):
expected = blocksize + i * blocksize + 1
actual = results[i]["values"][0]
assert actual == expected
def test_process_frames_summary():
buf = input_data(blocksize * 10)
ff = fr.frames_from_array(buf, blocksize, blocksize)
results = list(vamp.process_frames(ff, rate, blocksize, plugin_key, "input-summary", {}))
assert len(results) == 10
for i in range(len(results)):
expected = blocksize + i * blocksize + 1
actual = results[i]["values"][0]
assert actual == expected
def test_process_multi_summary():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ "input-summary" ], {}))
assert len(results) == 10
for i in range(len(results)):
expected = blocksize + i * blocksize + 1
actual = results[i]["input-summary"]["values"][0]
assert actual == expected
def test_process_frames_multi_summary():
buf = input_data(blocksize * 10)
ff = fr.frames_from_array(buf, blocksize, blocksize)
results = list(vamp.process_frames_multiple_outputs(ff, rate, blocksize, plugin_key, [ "input-summary" ], {}))
assert len(results) == 10
for i in range(len(results)):
expected = blocksize + i * blocksize + 1
actual = results[i]["input-summary"]["values"][0]
assert actual == expected
def test_process_freq_summary():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key_freq, "input-summary", {}))
assert len(results) == 20
for i in range(len(results)):
expected = i * (blocksize/2) + blocksize/2 + 1
if (i == len(results)-1):
expected = 0
expected = expected + blocksize - 1
actual = results[i]["values"][0]
eps = 1e-6
assert abs(actual - expected) < eps
def test_process_freq_summary_shift():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key_freq, "input-summary", {}, process_timestamp_method = vamp.vampyhost.SHIFT_DATA))
assert len(results) == 20
for i in range(len(results)):
if i == 0:
# spurious low values in the block converted back within the plugin
# because of the big discontinuity & window ripple after fftshift
pass
else:
expected = (i-1) * (blocksize/2) + blocksize/2 + 1 # for "first" elt
expected = expected + blocksize - 1 # non-zero elts
actual = results[i]["values"][0]
eps = 1e-6
assert abs(actual - expected) < eps
def test_process_multi_freq_summary():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key_freq, [ "input-summary" ], {}))
assert len(results) == 20
for i in range(len(results)):
expected = i * (blocksize/2) + blocksize/2 + 1 # "first" elt
if (i == len(results)-1):
expected = 0
expected = expected + blocksize - 1 # non-zero elts
actual = results[i]["input-summary"]["values"][0]
eps = 1e-6
assert abs(actual - expected) < eps
def test_process_timestamps():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key, "input-timestamp", {}))
assert len(results) == 10
for i in range(len(results)):
# The timestamp should be the frame number of the first frame in the
# input buffer
expected = i * blocksize
actual = results[i]["values"][0]
assert actual == expected
def test_process_multi_timestamps():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ "input-timestamp" ]))
assert len(results) == 10
for i in range(len(results)):
# The timestamp should be the frame number of the first frame in the
# input buffer
expected = i * blocksize
actual = results[i]["input-timestamp"]["values"][0]
assert actual == expected
def test_process_freq_timestamps():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key_freq, "input-timestamp", {}))
assert len(results) == 20
for i in range(len(results)):
# The timestamp should be the frame number of the frame just beyond
# half-way through the input buffer
expected = i * (blocksize/2) + blocksize/2
actual = results[i]["values"][0]
if actual == 2047 and expected == 2048:
print("This test fails because of a bug in the Vamp plugin SDK. Please update to SDK version 2.6.")
assert actual == expected
def test_process_freq_shift_timestamps():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key_freq, "input-timestamp", process_timestamp_method = vamp.vampyhost.SHIFT_DATA))
assert len(results) == 20
for i in range(len(results)):
# The timestamp should be the frame number of the frame at the start of
# the input buffer
expected = i * (blocksize/2)
actual = results[i]["values"][0]
if actual == 2047 and expected == 2048:
print("This test fails because of a bug in the Vamp plugin SDK. Please update to SDK version 2.6.")
assert actual == expected
def test_process_multi_freq_timestamps():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key_freq, [ "input-timestamp" ], {}))
assert len(results) == 20
for i in range(len(results)):
# The timestamp should be the frame number of the frame just beyond
# half-way through the input buffer
expected = i * (blocksize/2) + blocksize/2
actual = results[i]["input-timestamp"]["values"][0]
if actual == 2047 and expected == 2048:
print("This test fails because of a bug in the Vamp plugin SDK. Please update to SDK version 2.6.")
assert actual == expected
def test_process_blocksize_timestamps():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key, "input-timestamp", {}, block_size = blocksize * 2)) # step size defaults to block size
assert len(results) == 5
for i in range(len(results)):
# The timestamp should be the frame number of the first frame in the
# input buffer
expected = i * blocksize * 2
actual = results[i]["values"][0]
assert actual == expected
def test_process_stepsize_timestamps():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key, "input-timestamp", {}, step_size = int(blocksize / 2)))
assert len(results) == 20
for i in range(len(results)):
# The timestamp should be the frame number of the first frame in the
# input buffer
expected = (i * blocksize) / 2
actual = results[i]["values"][0]
assert actual == expected
def test_process_stepsize_blocksize_timestamps():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio(buf, rate, plugin_key, "input-timestamp", {}, block_size = blocksize * 2, step_size = int(blocksize / 2)))
assert len(results) == 20
for i in range(len(results)):
# The timestamp should be the frame number of the first frame in the
# input buffer
expected = (i * blocksize) / 2
actual = results[i]["values"][0]
assert actual == expected
def test_process_multiple_outputs():
buf = input_data(blocksize * 10)
results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ "input-summary", "input-timestamp" ], {}))
assert len(results) == 20
si = 0
ti = 0
for r in results:
assert "input-summary" in r or "input-timestamp" in r
if "input-summary" in r:
expected = blocksize + si * blocksize + 1
actual = r["input-summary"]["values"][0]
assert actual == expected
si = si + 1
if "input-timestamp" in r:
expected = ti * blocksize
actual = r["input-timestamp"]["values"][0]
assert actual == expected
ti = ti + 1
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.