hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7041c9a2d72c9bd22b94e66ee3c74d5036d345c
| 867
|
py
|
Python
|
rgb_stacking/contrib/common.py
|
ava6969/rgb_stacking_extend
|
a36f1e35aa796e77201321161056e174966e7707
|
[
"Apache-2.0"
] | null | null | null |
rgb_stacking/contrib/common.py
|
ava6969/rgb_stacking_extend
|
a36f1e35aa796e77201321161056e174966e7707
|
[
"Apache-2.0"
] | null | null | null |
rgb_stacking/contrib/common.py
|
ava6969/rgb_stacking_extend
|
a36f1e35aa796e77201321161056e174966e7707
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import torch
import torch.nn as nn
from rgb_stacking.utils.utils import init
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Sum(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
return torch.sum(x, self.dim)
class Mean(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
return torch.mean(x, self.dim)
def init_rec(rec):
for name, param in rec.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
elif 'weight' in name:
nn.init.orthogonal_(param)
return rec
def init_(m):
return init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
| 19.704545
| 58
| 0.596309
|
import numpy as np
import torch
import torch.nn as nn
from rgb_stacking.utils.utils import init
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Sum(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
return torch.sum(x, self.dim)
class Mean(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
return torch.mean(x, self.dim)
def init_rec(rec):
for name, param in rec.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
elif 'weight' in name:
nn.init.orthogonal_(param)
return rec
def init_(m):
return init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
| true
| true
|
f7041d491bfe36a21ac0fe91f27199165aa38729
| 1,226
|
py
|
Python
|
index_flask_qr/core/types23.py
|
lishnih/index_flask_qr
|
ac00346724d785d23a8991d760e831d89c746d2a
|
[
"MIT"
] | null | null | null |
index_flask_qr/core/types23.py
|
lishnih/index_flask_qr
|
ac00346724d785d23a8991d760e831d89c746d2a
|
[
"MIT"
] | null | null | null |
index_flask_qr/core/types23.py
|
lishnih/index_flask_qr
|
ac00346724d785d23a8991d760e831d89c746d2a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
# Stan 2018-08-04
import sys
if sys.version_info >= (3,):
class aStr():
def __str__(self):
return self.__unicode__()
def cmp(a, b):
return (a > b) - (a < b)
# range = range
def b(s):
return s.encode('utf-8')
def u(s):
return s.decode('utf-8')
# bytes = bytes
unicode = str
string_types = str,
numeric_types = int, float, complex
simple_types = int, float, complex, str, bytearray
collections_types = list, tuple, set, frozenset
all_types = (int, float, complex, str, bytearray,
list, tuple, set, frozenset, dict)
else:
class aStr():
def __str__(self):
return self.__unicode__().encode('utf-8')
# cmp = cmp
range = xrange
def b(s):
return s
def u(s):
return s
bytes = str
# unicode = unicode
string_types = basestring,
numeric_types = int, long, float, complex
simple_types = int, long, float, complex, basestring, bytearray
collections_types = list, tuple, set, frozenset
all_types = (int, long, float, complex, basestring, bytearray,
list, tuple, set, frozenset, dict)
| 20.779661
| 67
| 0.584013
|
import sys
if sys.version_info >= (3,):
class aStr():
def __str__(self):
return self.__unicode__()
def cmp(a, b):
return (a > b) - (a < b)
def b(s):
return s.encode('utf-8')
def u(s):
return s.decode('utf-8')
unicode = str
string_types = str,
numeric_types = int, float, complex
simple_types = int, float, complex, str, bytearray
collections_types = list, tuple, set, frozenset
all_types = (int, float, complex, str, bytearray,
list, tuple, set, frozenset, dict)
else:
class aStr():
def __str__(self):
return self.__unicode__().encode('utf-8')
range = xrange
def b(s):
return s
def u(s):
return s
bytes = str
string_types = basestring,
numeric_types = int, long, float, complex
simple_types = int, long, float, complex, basestring, bytearray
collections_types = list, tuple, set, frozenset
all_types = (int, long, float, complex, basestring, bytearray,
list, tuple, set, frozenset, dict)
| true
| true
|
f7041d686ea10e1bb192185c43073045a408c440
| 552
|
py
|
Python
|
sum_even.py
|
Mr-Umidjon/even_and_odd_numbers
|
2ad28c671db64d474afaffc444a1e807a7b82be7
|
[
"MIT"
] | null | null | null |
sum_even.py
|
Mr-Umidjon/even_and_odd_numbers
|
2ad28c671db64d474afaffc444a1e807a7b82be7
|
[
"MIT"
] | null | null | null |
sum_even.py
|
Mr-Umidjon/even_and_odd_numbers
|
2ad28c671db64d474afaffc444a1e807a7b82be7
|
[
"MIT"
] | null | null | null |
# A four-digit integer is given. Find the sum of even digits in it.
# Create a variable "var_int" and assign it a four-digit integer value.
# Create a variable "sum_even" and assign it 0.
# Find the sum of the even digits in the variable "var_int".
var_int = 1184
sum_even = 0
x1 = var_int % 10
var_int //= 10
sum_even += (x1 + 1) % 2 * x1
x2 = var_int % 10
var_int //= 10
sum_even += (x2 + 1) % 2 * x2
x3 = var_int % 10
var_int //= 10
sum_even += (x3 + 1) % 2 * x3
x4 = var_int % 10
var_int //= 10
sum_even += (x4 + 1) % 2 * x4
print(sum_even)
| 20.444444
| 71
| 0.641304
|
var_int = 1184
sum_even = 0
x1 = var_int % 10
var_int //= 10
sum_even += (x1 + 1) % 2 * x1
x2 = var_int % 10
var_int //= 10
sum_even += (x2 + 1) % 2 * x2
x3 = var_int % 10
var_int //= 10
sum_even += (x3 + 1) % 2 * x3
x4 = var_int % 10
var_int //= 10
sum_even += (x4 + 1) % 2 * x4
print(sum_even)
| true
| true
|
f7041dd4ace8385d6825cd0952034069e9abc390
| 5,354
|
py
|
Python
|
permafrost/forms.py
|
renderbox/django-permafrost
|
a3858d248e4ee2abac55e3663c2da68b8a52cea6
|
[
"MIT"
] | 7
|
2020-06-01T21:00:45.000Z
|
2021-11-14T18:20:04.000Z
|
permafrost/forms.py
|
renderbox/django-permafrost
|
a3858d248e4ee2abac55e3663c2da68b8a52cea6
|
[
"MIT"
] | 11
|
2020-11-20T21:35:41.000Z
|
2022-02-01T16:49:03.000Z
|
permafrost/forms.py
|
renderbox/django-permafrost
|
a3858d248e4ee2abac55e3663c2da68b8a52cea6
|
[
"MIT"
] | 1
|
2020-11-20T21:26:00.000Z
|
2020-11-20T21:26:00.000Z
|
# Permafrost Forms
from django.conf import settings
from django.contrib.auth.models import Permission
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.forms import ModelForm
from django.forms.fields import CharField, ChoiceField, BooleanField
from django.forms.models import ModelMultipleChoiceField
from django.forms.widgets import CheckboxInput
from django.utils.translation import ugettext_lazy as _
from .models import PermafrostRole, get_optional_by_category, get_choices
CHOICES = [('', _("Choose Role Type"))] + get_choices()
LABELS = {
'name': _('Role Name'),
'category': _('Role Type')
}
def assemble_optiongroups_for_widget(permissions):
choices = []
optgroups = {}
if permissions:
for perm in permissions:
if perm.content_type.name in optgroups:
optgroups[perm.content_type.name].append((perm.pk, perm.name,))
else:
optgroups[perm.content_type.name] = [(perm.pk, perm.name,)]
for model_name, options in optgroups.items():
choices.append([model_name, options])
return choices
def bootstrappify(fields):
for field in fields:
widget = fields[field].widget
if not isinstance(widget, CheckboxInput):
if 'class' in widget.attrs:
widget.attrs['class'] = widget.attrs['class'] + " form-control"
else:
widget.attrs.update({'class':'form-control'})
class SelectPermafrostRoleTypeForm(ModelForm):
name = CharField(required=False)
description = CharField(required=False)
category = ChoiceField(choices=CHOICES)
class Meta:
model = PermafrostRole
fields = ('name', 'description', 'category',)
labels = LABELS
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
bootstrappify(self.fields)
class PermafrostRoleCreateForm(ModelForm):
permissions = ModelMultipleChoiceField(queryset=Permission.objects.all(), required=False)
class Meta:
model = PermafrostRole
fields = ('name', 'description', 'category', 'permissions')
labels = LABELS
def __init__(self, *args, **kwargs):
self.site = kwargs.pop('site', Site.objects.get_current())
super().__init__(*args, **kwargs)
self.fields['category'].choices = CHOICES
category = self.initial.get(
'category',
self.data.get('category', None)
)
if self.instance:
category = self.instance.category if self.instance.category else category
if category:
all_optional_permissions = get_optional_by_category(category=category)
ids = [perm.pk for perm in all_optional_permissions]
self.fields['permissions'].queryset = Permission.objects.filter(id__in=ids)
bootstrappify(self.fields)
def save(self, commit=True):
self.instance.site = self.site
instance = super().save(commit)
category = instance.category
if 'permissions' in self.cleaned_data:
perm_ids = []
if category:
perm_ids = self.cleaned_data['permissions']
if perm_ids:
instance.permissions_set(Permission.objects.filter(id__in=perm_ids))
else:
instance.permissions_clear()
return instance
def clean_name(self):
name = self.cleaned_data['name']
name_exists = False
if self.instance: ## on update check if name change exists
if 'name' in self.changed_data:
name_exists = PermafrostRole.objects.filter(
name=name,
site=self.site,
).exclude(pk=self.instance.pk).first()
else:
try:
name_exists = PermafrostRole.objects.get(
name=name,
site=self.site
)
except PermafrostRole.DoesNotExist:
pass
if name_exists:
raise ValidationError('Role with this name already exists')
# Always return field
return name
class PermafrostRoleUpdateForm(PermafrostRoleCreateForm):
"""
Form used to display role detail
Only allowed to edit optional permissions, name and description
Category and required permissions stay locked
"""
deleted = BooleanField(required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['category'].widget.attrs.update({'readonly': True, 'disabled': True})
self.fields['category'].disabled = True
self.fields['category'].required = False
self.fields['category'].choices = [choice for choice in CHOICES if choice[0] == self.instance.category]
self.fields['category'].initial = self.instance.category
## limit choices to saved category
self.fields['deleted'].initial = self.instance.deleted
def save(self, commit=True):
if self.cleaned_data['deleted']:
self.instance.deleted = self.cleaned_data['deleted']
instance = super().save(commit)
return instance
| 33.672956
| 111
| 0.621965
|
from django.conf import settings
from django.contrib.auth.models import Permission
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.forms import ModelForm
from django.forms.fields import CharField, ChoiceField, BooleanField
from django.forms.models import ModelMultipleChoiceField
from django.forms.widgets import CheckboxInput
from django.utils.translation import ugettext_lazy as _
from .models import PermafrostRole, get_optional_by_category, get_choices
CHOICES = [('', _("Choose Role Type"))] + get_choices()
LABELS = {
'name': _('Role Name'),
'category': _('Role Type')
}
def assemble_optiongroups_for_widget(permissions):
choices = []
optgroups = {}
if permissions:
for perm in permissions:
if perm.content_type.name in optgroups:
optgroups[perm.content_type.name].append((perm.pk, perm.name,))
else:
optgroups[perm.content_type.name] = [(perm.pk, perm.name,)]
for model_name, options in optgroups.items():
choices.append([model_name, options])
return choices
def bootstrappify(fields):
for field in fields:
widget = fields[field].widget
if not isinstance(widget, CheckboxInput):
if 'class' in widget.attrs:
widget.attrs['class'] = widget.attrs['class'] + " form-control"
else:
widget.attrs.update({'class':'form-control'})
class SelectPermafrostRoleTypeForm(ModelForm):
name = CharField(required=False)
description = CharField(required=False)
category = ChoiceField(choices=CHOICES)
class Meta:
model = PermafrostRole
fields = ('name', 'description', 'category',)
labels = LABELS
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
bootstrappify(self.fields)
class PermafrostRoleCreateForm(ModelForm):
permissions = ModelMultipleChoiceField(queryset=Permission.objects.all(), required=False)
class Meta:
model = PermafrostRole
fields = ('name', 'description', 'category', 'permissions')
labels = LABELS
def __init__(self, *args, **kwargs):
self.site = kwargs.pop('site', Site.objects.get_current())
super().__init__(*args, **kwargs)
self.fields['category'].choices = CHOICES
category = self.initial.get(
'category',
self.data.get('category', None)
)
if self.instance:
category = self.instance.category if self.instance.category else category
if category:
all_optional_permissions = get_optional_by_category(category=category)
ids = [perm.pk for perm in all_optional_permissions]
self.fields['permissions'].queryset = Permission.objects.filter(id__in=ids)
bootstrappify(self.fields)
def save(self, commit=True):
self.instance.site = self.site
instance = super().save(commit)
category = instance.category
if 'permissions' in self.cleaned_data:
perm_ids = []
if category:
perm_ids = self.cleaned_data['permissions']
if perm_ids:
instance.permissions_set(Permission.objects.filter(id__in=perm_ids))
else:
instance.permissions_clear()
return instance
def clean_name(self):
name = self.cleaned_data['name']
name_exists = False
if self.instance:
if 'name' in self.changed_data:
name_exists = PermafrostRole.objects.filter(
name=name,
site=self.site,
).exclude(pk=self.instance.pk).first()
else:
try:
name_exists = PermafrostRole.objects.get(
name=name,
site=self.site
)
except PermafrostRole.DoesNotExist:
pass
if name_exists:
raise ValidationError('Role with this name already exists')
return name
class PermafrostRoleUpdateForm(PermafrostRoleCreateForm):
deleted = BooleanField(required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['category'].widget.attrs.update({'readonly': True, 'disabled': True})
self.fields['category'].disabled = True
self.fields['category'].required = False
self.fields['category'].choices = [choice for choice in CHOICES if choice[0] == self.instance.category]
self.fields['category'].initial = self.instance.category
self.fields['deleted'].initial = self.instance.deleted
def save(self, commit=True):
if self.cleaned_data['deleted']:
self.instance.deleted = self.cleaned_data['deleted']
instance = super().save(commit)
return instance
| true
| true
|
f7041e9f4cda9730adee3c84e52ce84f4085adad
| 3,070
|
py
|
Python
|
qlib/contrib/data/highfreq_processor.py
|
SunsetWolf/qlib
|
89972f6c6f9fa629b4f74093d4ba1e93c9f7a5e5
|
[
"MIT"
] | 1
|
2021-12-14T13:48:38.000Z
|
2021-12-14T13:48:38.000Z
|
qlib/contrib/data/highfreq_processor.py
|
SunsetWolf/qlib
|
89972f6c6f9fa629b4f74093d4ba1e93c9f7a5e5
|
[
"MIT"
] | null | null | null |
qlib/contrib/data/highfreq_processor.py
|
SunsetWolf/qlib
|
89972f6c6f9fa629b4f74093d4ba1e93c9f7a5e5
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import pandas as pd
from qlib.data.dataset.processor import Processor
from qlib.data.dataset.utils import fetch_df_by_index
from typing import Dict
class HighFreqTrans(Processor):
def __init__(self, dtype: str = "bool"):
self.dtype = dtype
def fit(self, df_features):
pass
def __call__(self, df_features):
if self.dtype == "bool":
return df_features.astype(np.int8)
else:
return df_features.astype(np.float32)
class HighFreqNorm(Processor):
def __init__(
self,
fit_start_time: pd.Timestamp,
fit_end_time: pd.Timestamp,
feature_save_dir: str,
norm_groups: Dict[str, int],
):
self.fit_start_time = fit_start_time
self.fit_end_time = fit_end_time
self.feature_save_dir = feature_save_dir
self.norm_groups = norm_groups
def fit(self, df_features) -> None:
if os.path.exists(self.feature_save_dir) and len(os.listdir(self.feature_save_dir)) != 0:
return
os.makedirs(self.feature_save_dir)
fetch_df = fetch_df_by_index(df_features, slice(self.fit_start_time, self.fit_end_time), level="datetime")
del df_features
index = 0
names = {}
for name, dim in self.norm_groups.items():
names[name] = slice(index, index + dim)
index += dim
for name, name_val in names.items():
df_values = fetch_df.iloc(axis=1)[name_val].values
if name.endswith("volume"):
df_values = np.log1p(df_values)
self.feature_mean = np.nanmean(df_values)
np.save(self.feature_save_dir + name + "_mean.npy", self.feature_mean)
df_values = df_values - self.feature_mean
self.feature_std = np.nanstd(np.absolute(df_values))
np.save(self.feature_save_dir + name + "_std.npy", self.feature_std)
df_values = df_values / self.feature_std
np.save(self.feature_save_dir + name + "_vmax.npy", np.nanmax(df_values))
np.save(self.feature_save_dir + name + "_vmin.npy", np.nanmin(df_values))
return
def __call__(self, df_features):
if "date" in df_features:
df_features.droplevel("date", inplace=True)
df_values = df_features.values
index = 0
names = {}
for name, dim in self.norm_groups.items():
names[name] = slice(index, index + dim)
index += dim
for name, name_val in names.items():
feature_mean = np.load(self.feature_save_dir + name + "_mean.npy")
feature_std = np.load(self.feature_save_dir + name + "_std.npy")
if name.endswith("volume"):
df_values[:, name_val] = np.log1p(df_values[:, name_val])
df_values[:, name_val] -= feature_mean
df_values[:, name_val] /= feature_std
df_features = pd.DataFrame(data=df_values, index=df_features.index, columns=df_features.columns)
return df_features.fillna(0)
| 37.439024
| 114
| 0.625081
|
import os
import numpy as np
import pandas as pd
from qlib.data.dataset.processor import Processor
from qlib.data.dataset.utils import fetch_df_by_index
from typing import Dict
class HighFreqTrans(Processor):
def __init__(self, dtype: str = "bool"):
self.dtype = dtype
def fit(self, df_features):
pass
def __call__(self, df_features):
if self.dtype == "bool":
return df_features.astype(np.int8)
else:
return df_features.astype(np.float32)
class HighFreqNorm(Processor):
def __init__(
self,
fit_start_time: pd.Timestamp,
fit_end_time: pd.Timestamp,
feature_save_dir: str,
norm_groups: Dict[str, int],
):
self.fit_start_time = fit_start_time
self.fit_end_time = fit_end_time
self.feature_save_dir = feature_save_dir
self.norm_groups = norm_groups
def fit(self, df_features) -> None:
if os.path.exists(self.feature_save_dir) and len(os.listdir(self.feature_save_dir)) != 0:
return
os.makedirs(self.feature_save_dir)
fetch_df = fetch_df_by_index(df_features, slice(self.fit_start_time, self.fit_end_time), level="datetime")
del df_features
index = 0
names = {}
for name, dim in self.norm_groups.items():
names[name] = slice(index, index + dim)
index += dim
for name, name_val in names.items():
df_values = fetch_df.iloc(axis=1)[name_val].values
if name.endswith("volume"):
df_values = np.log1p(df_values)
self.feature_mean = np.nanmean(df_values)
np.save(self.feature_save_dir + name + "_mean.npy", self.feature_mean)
df_values = df_values - self.feature_mean
self.feature_std = np.nanstd(np.absolute(df_values))
np.save(self.feature_save_dir + name + "_std.npy", self.feature_std)
df_values = df_values / self.feature_std
np.save(self.feature_save_dir + name + "_vmax.npy", np.nanmax(df_values))
np.save(self.feature_save_dir + name + "_vmin.npy", np.nanmin(df_values))
return
def __call__(self, df_features):
if "date" in df_features:
df_features.droplevel("date", inplace=True)
df_values = df_features.values
index = 0
names = {}
for name, dim in self.norm_groups.items():
names[name] = slice(index, index + dim)
index += dim
for name, name_val in names.items():
feature_mean = np.load(self.feature_save_dir + name + "_mean.npy")
feature_std = np.load(self.feature_save_dir + name + "_std.npy")
if name.endswith("volume"):
df_values[:, name_val] = np.log1p(df_values[:, name_val])
df_values[:, name_val] -= feature_mean
df_values[:, name_val] /= feature_std
df_features = pd.DataFrame(data=df_values, index=df_features.index, columns=df_features.columns)
return df_features.fillna(0)
| true
| true
|
f7041fa17dca2b34640f4e235828199807afd246
| 1,487
|
py
|
Python
|
network/cnn.py
|
hgKwak/SeriesSleepNet-
|
1e90c3a0ed6244c2b876979194d7cd94056f5c8a
|
[
"MIT"
] | null | null | null |
network/cnn.py
|
hgKwak/SeriesSleepNet-
|
1e90c3a0ed6244c2b876979194d7cd94056f5c8a
|
[
"MIT"
] | null | null | null |
network/cnn.py
|
hgKwak/SeriesSleepNet-
|
1e90c3a0ed6244c2b876979194d7cd94056f5c8a
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
use_cuda = torch.cuda.is_available()
class CNNClassifier(nn.Module):
def __init__(self, channel, SHHS=False):
super(CNNClassifier, self).__init__()
conv1 = nn.Conv2d(1, 10, (1, 200))
pool1 = nn.MaxPool2d((1, 2))
if channel == 1:
conv2 = nn.Conv2d(10, 20, (1, 32))
conv3 = nn.Conv2d(20, 30, (1, 128))
conv4 = nn.Conv2d(30, 40, (1, 512))
freq = 1
else:
conv2 = nn.Conv2d(10, 20, (2, 32))
conv3 = nn.Conv2d(20, 30, (2, 128))
conv4 = nn.Conv2d(30, 40, (2, 512))
freq=channel-3
pool2 = nn.MaxPool2d((1, 2))
self.conv_module = nn.Sequential(conv1, nn.ReLU(), pool1, conv2, nn.ReLU(), conv3, nn.ReLU(), conv4, nn.ReLU(), pool2)
if SHHS:
fc1 = nn.Linear(freq * 40 * 553, 100)
else:
fc1 = nn.Linear(freq*40*365, 100)
fc2 = nn.Linear(100, 5)
self.fc_module = nn.Sequential(fc1, nn.ReLU(), fc2)
if use_cuda:
self.conv_module = self.conv_module.cuda()
self.fc_module = self.fc_module.cuda()
def forward(self, x, isfc):
out = self.conv_module(x)
dim = 1
for d in out.size()[1:]:
dim *= d
if isfc:
out = out.view(-1, dim)
out = self.fc_module(out)
else:
out = out.permute(0, 3, 2, 1).reshape([-1, 200, 73])
return out
| 33.044444
| 126
| 0.511096
|
import torch
import torch.nn as nn
use_cuda = torch.cuda.is_available()
class CNNClassifier(nn.Module):
def __init__(self, channel, SHHS=False):
super(CNNClassifier, self).__init__()
conv1 = nn.Conv2d(1, 10, (1, 200))
pool1 = nn.MaxPool2d((1, 2))
if channel == 1:
conv2 = nn.Conv2d(10, 20, (1, 32))
conv3 = nn.Conv2d(20, 30, (1, 128))
conv4 = nn.Conv2d(30, 40, (1, 512))
freq = 1
else:
conv2 = nn.Conv2d(10, 20, (2, 32))
conv3 = nn.Conv2d(20, 30, (2, 128))
conv4 = nn.Conv2d(30, 40, (2, 512))
freq=channel-3
pool2 = nn.MaxPool2d((1, 2))
self.conv_module = nn.Sequential(conv1, nn.ReLU(), pool1, conv2, nn.ReLU(), conv3, nn.ReLU(), conv4, nn.ReLU(), pool2)
if SHHS:
fc1 = nn.Linear(freq * 40 * 553, 100)
else:
fc1 = nn.Linear(freq*40*365, 100)
fc2 = nn.Linear(100, 5)
self.fc_module = nn.Sequential(fc1, nn.ReLU(), fc2)
if use_cuda:
self.conv_module = self.conv_module.cuda()
self.fc_module = self.fc_module.cuda()
def forward(self, x, isfc):
out = self.conv_module(x)
dim = 1
for d in out.size()[1:]:
dim *= d
if isfc:
out = out.view(-1, dim)
out = self.fc_module(out)
else:
out = out.permute(0, 3, 2, 1).reshape([-1, 200, 73])
return out
| true
| true
|
f7042094ef12f628d0134a2a1e1460a0150617e1
| 1,017
|
py
|
Python
|
lessons/functions.py
|
Friction-Log/learning_python_frictionlog
|
6850c8873517254650c456ce78dfc5afd542fa4b
|
[
"MIT"
] | null | null | null |
lessons/functions.py
|
Friction-Log/learning_python_frictionlog
|
6850c8873517254650c456ce78dfc5afd542fa4b
|
[
"MIT"
] | null | null | null |
lessons/functions.py
|
Friction-Log/learning_python_frictionlog
|
6850c8873517254650c456ce78dfc5afd542fa4b
|
[
"MIT"
] | null | null | null |
from math import sqrt
# function with int parameter
def my_function(a: str):
print(a)
my_function(3)
# function with type annotation
def my_function2(a: str) -> str:
return a
print(my_function2(3))
# import sqrt from math and use it
print(sqrt(9.4323))
# import alias from math
# from math import sqrt as square_root
# function with list parameter
def my_function3(a: list):
for i in a:
print(i)
my_function3([1, 2, 3, 4, 5])
# function with dictionary parameter
def my_function4(a: dict):
for key, value in a.items():
print(key, value)
my_function4({'a': 1, 'b': 2, 'c': 3})
# function with tuple parameter
def my_function5(a: tuple):
for i in a:
print(i)
my_function5(('a', 'b', 'c', 'd'))
# function with set parameter
def my_function6(a: set):
for i in a:
print(i)
my_function6({'a', 'b', 'c', 'd'})
# function with function parameter
def my_function7(a: callable):
a()
# make an http request async
async def my_function8(a: callable):
a()
# my_function8(lambda: print('hello'))
| 17.237288
| 38
| 0.687316
|
from math import sqrt
def my_function(a: str):
print(a)
my_function(3)
def my_function2(a: str) -> str:
return a
print(my_function2(3))
print(sqrt(9.4323))
def my_function3(a: list):
for i in a:
print(i)
my_function3([1, 2, 3, 4, 5])
def my_function4(a: dict):
for key, value in a.items():
print(key, value)
my_function4({'a': 1, 'b': 2, 'c': 3})
def my_function5(a: tuple):
for i in a:
print(i)
my_function5(('a', 'b', 'c', 'd'))
def my_function6(a: set):
for i in a:
print(i)
my_function6({'a', 'b', 'c', 'd'})
def my_function7(a: callable):
a()
async def my_function8(a: callable):
a()
| true
| true
|
f704217e9f7c9de573130b7171c75317e1a0a859
| 29,216
|
py
|
Python
|
cv_utils/core.py
|
WildflowerSchools/wf-cv-utils
|
647a2a46e3d6e6e14a1f813d17064cb33a3ced92
|
[
"MIT"
] | null | null | null |
cv_utils/core.py
|
WildflowerSchools/wf-cv-utils
|
647a2a46e3d6e6e14a1f813d17064cb33a3ced92
|
[
"MIT"
] | 4
|
2020-01-10T01:28:39.000Z
|
2022-01-20T03:31:11.000Z
|
cv_utils/core.py
|
WildflowerSchools/wf-cv-utils
|
647a2a46e3d6e6e14a1f813d17064cb33a3ced92
|
[
"MIT"
] | 2
|
2019-12-06T19:46:01.000Z
|
2019-12-11T22:37:43.000Z
|
import cv_datetime_utils
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize
import json
import os
def compose_transformations(
rotation_vector_1,
translation_vector_1,
rotation_vector_2,
translation_vector_2):
rotation_vector_1 = np.asarray(rotation_vector_1).reshape(3)
translation_vector_1 = np.asarray(translation_vector_1).reshape(3)
rotation_vector_2 = np.asarray(rotation_vector_2).reshape(3)
translation_vector_2 = np.asarray(translation_vector_2).reshape(3)
rotation_vector_composed, translation_vector_composed = cv.composeRT(
rotation_vector_1,
translation_vector_1,
rotation_vector_2,
translation_vector_2)[:2]
rotation_vector_composed = np.squeeze(rotation_vector_composed)
translation_vector_composed = np.squeeze(translation_vector_composed)
return rotation_vector_composed, translation_vector_composed
def invert_transformation(
rotation_vector,
translation_vector):
rotation_vector = np.asarray(rotation_vector).reshape(3)
translation_vector = np.asarray(translation_vector).reshape(3)
new_rotation_vector, new_translation_vector = compose_transformations(
np.array([0.0, 0.0, 0.0]),
-translation_vector,
-rotation_vector,
np.array([0.0, 0.0, 0.0]))
new_rotation_vector = np.squeeze(new_rotation_vector)
new_translation_vector = np.squeeze(new_translation_vector)
return new_rotation_vector, new_translation_vector
def quaternion_vector_to_rotation_vector(quaternion_vector):
quaternion_vector = np.asarray(quaternion_vector).reshape(4)
spatial_vector = quaternion_vector[1:]
qw = quaternion_vector[0]
spatial_vector_length = np.linalg.norm(spatial_vector)
unit_vector = spatial_vector/spatial_vector_length
theta = 2*np.arctan2(spatial_vector_length, qw)
rotation_vector = theta*unit_vector
return rotation_vector
def quaternion_vector_to_rotation_matrix(quaternion_vector):
quaternion_tuple = tuple(np.asarray(quaternion_vector).reshape(4))
qw, qx, qy, qz = quaternion_tuple
R = np.array([
[qw**2 + qx**2 - qy**2 - qz**2, 2*(qx*qy - qw*qz), 2*(qw*qy + qx*qz)],
[2*(qx*qy + qw*qz), qw**2 - qx**2 + qy**2 - qz**2, 2*(qy*qz - qw*qx)],
[2*(qx*qz - qw*qy), 2*(qw*qx + qy*qz), qw**2 - qx**2 - qy**2 + qz**2]
])
return R
def rotation_vector_to_rotation_matrix(rotation_vector):
rotation_vector = np.asarray(rotation_vector).reshape(3)
rotation_matrix = cv.Rodrigues(rotation_vector)[0]
return rotation_matrix
def transform_object_points(
object_points,
rotation_vector=np.array([0.0, 0.0, 0.0]),
translation_vector=np.array([0.0, 0.0, 0.0])):
object_points = np.asarray(object_points)
rotation_vector = np.asarray(rotation_vector)
translation_vector = np.asarray(translation_vector)
if object_points.size == 0:
return object_points
object_points = object_points.reshape((-1, 3))
rotation_vector = rotation_vector.reshape(3)
translation_vector = translation_vector.reshape(3)
transformed_points = np.add(
np.matmul(
cv.Rodrigues(rotation_vector)[0],
object_points.T).T,
translation_vector.reshape((1, 3)))
transformed_points = np.squeeze(transformed_points)
return transformed_points
def generate_camera_pose(
camera_position=np.array([0.0, 0.0, 0.0]),
yaw=0.0,
pitch=0.0,
roll=0.0):
# yaw: 0.0 points north (along the positive y-axis), positive angles rotate counter-clockwise
# pitch: 0.0 is level with the ground, positive angles rotate upward
# roll: 0.0 is level with the ground, positive angles rotate clockwise
# All angles in radians
camera_position = np.asarray(camera_position).reshape(3)
# First: Move the camera to the specified position
rotation_vector_1 = np.array([0.0, 0.0, 0.0])
translation_vector_1 = -camera_position
# Second: Rotate the camera so when we lower to the specified inclination, it will point in the specified compass direction
rotation_vector_2 = np.array([0.0, 0.0, -(yaw - np.pi / 2)])
translation_vector_2 = np.array([0.0, 0.0, 0.0])
# Third: Lower to the specified inclination
rotation_vector_2_3 = np.array([(np.pi / 2 - pitch), 0.0, 0.0])
translation_vector_2_3 = np.array([0.0, 0.0, 0.0])
# Fourth: Roll the camera by the specified angle
rotation_vector_2_3_4 = np.array([0.0, 0.0, -roll])
translation_vector_2_3_4 = np.array([0.0, 0.0, 0.0])
# Combine these four moves
rotation_vector_1_2, translation_vector_1_2 = compose_transformations(
rotation_vector_1,
translation_vector_1,
rotation_vector_2,
translation_vector_2)
rotation_vector_1_2_3, translation_vector_1_2_3 = compose_transformations(
rotation_vector_1_2,
translation_vector_1_2,
rotation_vector_2_3,
translation_vector_2_3)
rotation_vector, translation_vector = compose_transformations(
rotation_vector_1_2_3,
translation_vector_1_2_3,
rotation_vector_2_3_4,
translation_vector_2_3_4)
rotation_vector = np.squeeze(rotation_vector)
translation_vector = np.squeeze(translation_vector)
return rotation_vector, translation_vector
def extract_camera_position(
rotation_vector,
translation_vector):
rotation_vector = np.asarray(rotation_vector).reshape(3)
translation_vector = np.asarray(translation_vector).reshape(3)
new_rotation_vector, new_translation_vector = compose_transformations(
rotation_vector,
translation_vector,
-rotation_vector,
np.array([0.0, 0.0, 0.0]))
camera_position = -np.squeeze(new_translation_vector)
return camera_position
def extract_camera_position_rotation_matrix(rotation_matrix, translation_vector):
rotation_matrix = np.asarray(rotation_matrix).reshape((3,3))
translation_vector = np.asarray(translation_vector).reshape(3)
position = np.matmul(rotation_matrix.T, -translation_vector.T)
return position
def extract_camera_direction(
rotation_vector,
translation_vector):
rotation_vector = np.asarray(rotation_vector).reshape(3)
translation_vector = np.asarray(translation_vector).reshape(3)
camera_direction = np.matmul(
cv.Rodrigues(-rotation_vector)[0],
np.array([[0.0], [0.0], [1.0]]))
camera_direction = np.squeeze(camera_direction)
return camera_direction
def reconstruct_z_rotation(x, y):
if x >= 0.0 and y >= 0.0:
return np.arctan(y / x)
if x >= 0.0 and y < 0.0:
return np.arctan(y / x) + 2 * np.pi
return np.arctan(y / x) + np.pi
# Currently unused; needs to be fixed up for cases in which x and/or y are close
# to zero
def extract_yaw_from_camera_direction(
camera_direction):
camera_direction = np.asarray(camera_direction).reshape(3)
yaw = reconstruct_z_rotation(
camera_direction[0],
camera_direction[1])
return yaw
def generate_camera_matrix(
focal_length,
principal_point):
focal_length = np.asarray(focal_length).reshape(2)
principal_point = np.asarray(principal_point).reshape(2)
camera_matrix = np.array([
[focal_length[0], 0, principal_point[0]],
[0, focal_length[1], principal_point[1]],
[0, 0, 1.0]])
return camera_matrix
def generate_projection_matrix(
camera_matrix,
rotation_vector,
translation_vector):
camera_matrix = np.asarray(camera_matrix).reshape((3, 3))
rotation_vector = np.asarray(rotation_vector).reshape(3)
translation_vector = np.asarray(translation_vector).reshape(3)
projection_matrix = np.matmul(
camera_matrix,
np.concatenate((
cv.Rodrigues(rotation_vector)[0],
translation_vector.reshape((3, 1))),
axis=1))
return(projection_matrix)
def ground_grid_camera_view(
image_width,
image_height,
rotation_vector,
translation_vector,
camera_matrix,
distortion_coefficients=np.array([0.0, 0.0, 0.0, 0.0]),
fill_image=False,
step=0.1
):
grid_corners = ground_rectangle_camera_view(
image_width=image_width,
image_height=image_height,
rotation_vector=rotation_vector,
translation_vector=translation_vector,
camera_matrix=camera_matrix,
distortion_coefficients=distortion_coefficients,
fill_image=fill_image
)
grid_points = generate_ground_grid(
grid_corners=grid_corners,
step=step
)
return grid_points
def ground_rectangle_camera_view(
image_width,
image_height,
rotation_vector,
translation_vector,
camera_matrix,
distortion_coefficients=np.array([0.0, 0.0, 0.0, 0.0]),
fill_image=False
):
image_points = np.array([
[0.0, 0.0],
[image_width, 0.0],
[image_width, image_height],
[0.0, image_height]
])
ground_points=np.empty((4, 3))
for i in range(4):
ground_points[i] = ground_point(
image_point=image_points[i],
rotation_vector=rotation_vector,
translation_vector=translation_vector,
camera_matrix=camera_matrix,
distortion_coefficients=distortion_coefficients
)
x_values_sorted = np.sort(ground_points[:, 0])
y_values_sorted = np.sort(ground_points[:, 1])
if fill_image:
x_min = x_values_sorted[0]
x_max = x_values_sorted[3]
y_min = y_values_sorted[0]
y_max = y_values_sorted[3]
else:
x_min = x_values_sorted[1]
x_max = x_values_sorted[2]
y_min = y_values_sorted[1]
y_max = y_values_sorted[2]
return np.array([
[x_min, y_min],
[x_max, y_max]
])
def ground_point(
image_point,
rotation_vector,
translation_vector,
camera_matrix,
distortion_coefficients=np.array([0.0, 0.0, 0.0, 0.0])
):
image_point = np.asarray(image_point)
rotation_vector = np.asarray(rotation_vector)
translation_vector = np.asarray(translation_vector)
camera_matrix = np.asarray(camera_matrix)
distortion_coefficients = np.asarray(distortion_coefficients)
image_point = image_point.reshape((2))
rotation_vector = rotation_vector.reshape(3)
translation_vector = translation_vector.reshape(3)
camera_matrix = camera_matrix.reshape((3, 3))
image_point_undistorted = cv.undistortPoints(
image_point,
camera_matrix,
distortion_coefficients,
P=camera_matrix
)
image_point_undistorted = np.squeeze(image_point_undistorted)
camera_position = np.matmul(
cv.Rodrigues(-rotation_vector)[0],
-translation_vector.T
).T
camera_point_homogeneous = np.matmul(
np.linalg.inv(camera_matrix),
np.array([image_point_undistorted[0], image_point_undistorted[1], 1.0]).T
).T
camera_direction = np.matmul(
cv.Rodrigues(-rotation_vector)[0],
camera_point_homogeneous.T
).T
theta = -camera_position[2]/camera_direction[2]
ground_point = camera_position + theta*camera_direction
return ground_point
def generate_ground_grid(
grid_corners,
step=0.1
):
x_grid, y_grid = np.meshgrid(
np.arange(grid_corners[0, 0], grid_corners[1, 0], step=step),
np.arange(grid_corners[0, 1], grid_corners[1, 1], step=step)
)
grid = np.stack((x_grid, y_grid, np.full_like(x_grid, 0.0)), axis=-1)
points = grid.reshape((-1, 3))
return points
def project_points(
object_points,
rotation_vector,
translation_vector,
camera_matrix,
distortion_coefficients,
remove_behind_camera=False,
remove_outside_frame=False,
image_corners=None
):
object_points = np.asarray(object_points).reshape((-1, 3))
rotation_vector = np.asarray(rotation_vector).reshape(3)
translation_vector = np.asarray(translation_vector).reshape(3)
camera_matrix = np.asarray(camera_matrix).reshape((3, 3))
distortion_coefficients = np.squeeze(np.asarray(distortion_coefficients))
if object_points.size == 0:
return np.zeros((0, 2))
image_points = cv.projectPoints(
object_points,
rotation_vector,
translation_vector,
camera_matrix,
distortion_coefficients
)[0]
if remove_behind_camera:
behind_camera_boolean = behind_camera(
object_points,
rotation_vector,
translation_vector
)
image_points[behind_camera_boolean] = np.array([np.nan, np.nan])
if remove_outside_frame:
outside_frame_boolean = outside_frame(
object_points,
rotation_vector,
translation_vector,
camera_matrix,
distortion_coefficients,
image_corners
)
image_points[outside_frame_boolean] = np.array([np.nan, np.nan])
image_points = np.squeeze(image_points)
return image_points
def behind_camera(
object_points,
rotation_vector,
translation_vector):
object_points = np.asarray(object_points)
rotation_vector = np.asarray(rotation_vector)
translation_vector = np.asarray(translation_vector)
if object_points.size == 0:
return np.zeros((0, 2))
object_points = object_points.reshape((-1, 3))
rotation_vector = rotation_vector.reshape(3)
translation_vector = translation_vector.reshape(3)
object_points_transformed = transform_object_points(
object_points,
rotation_vector,
translation_vector
)
behind_camera_boolean = (object_points_transformed <= 0)[..., 2]
return behind_camera_boolean
def outside_frame(
object_points,
rotation_vector,
translation_vector,
camera_matrix,
distortion_coefficients,
image_corners
):
object_points = np.asarray(object_points).reshape((-1, 3))
rotation_vector = np.asarray(rotation_vector)
translation_vector = np.asarray(translation_vector).reshape(3)
camera_matrix = np.asarray(camera_matrix).reshape((3,3))
distortion_coefficients = np.squeeze(np.asarray(distortion_coefficients))
image_corners = np.asarray(image_corners).reshape((2,2))
if object_points.size == 0:
return np.zeros((0, 2))
image_points = cv.projectPoints(
object_points,
rotation_vector,
translation_vector,
camera_matrix,
np.array([0.0, 0.0, 0.0, 0.0])
)[0]
image_points = image_points.reshape((-1, 2))
outside_frame_boolean = (
(image_points[:, 0] < image_corners[0, 0]) |
(image_points[:, 0] > image_corners[1, 0]) |
(image_points[:, 1] < image_corners[0, 1]) |
(image_points[:, 1] > image_corners[1, 1])
)
return outside_frame_boolean
def undistort_points(
image_points,
camera_matrix,
distortion_coefficients):
image_points = np.asarray(image_points)
camera_matrix = np.asarray(camera_matrix)
distortion_coefficients = np.asarray(distortion_coefficients)
if image_points.size == 0:
return image_points
image_points = image_points.reshape((-1, 1, 2))
camera_matrix = camera_matrix.reshape((3, 3))
undistorted_points = cv.undistortPoints(
image_points,
camera_matrix,
distortion_coefficients,
P=camera_matrix)
undistorted_points = np.squeeze(undistorted_points)
return undistorted_points
def estimate_camera_pose_from_image_points(
image_points_1,
image_points_2,
camera_matrix,
rotation_vector_1=np.array([0.0, 0.0, 0.0]),
translation_vector_1=np.array([0.0, 0.0, 0.0]),
distance_between_cameras=1.0):
image_points_1 = np.asarray(image_points_1)
image_points_2 = np.asarray(image_points_2)
camera_matrix = np.asarray(camera_matrix)
rotation_vector_1 = np.asarray(rotation_vector_1)
translation_vector_1 = np.asarray(translation_vector_1)
if image_points_1.size == 0 or image_points_2.size == 0:
raise ValueError('One or both sets of image points appear to be empty')
image_points_1 = image_points_1.reshape((-1, 2))
image_points_2 = image_points_2.reshape((-1, 2))
if image_points_1.shape != image_points_2.shape:
raise ValueError('Sets of image points do not appear to be the same shape')
camera_matrix = camera_matrix.reshape((3, 3))
rotation_vector_1 = rotation_vector_1.reshape(3)
translation_vector_1 = translation_vector_1.reshape(3)
essential_matrix, mask = cv.findEssentialMat(
image_points_1,
image_points_2,
camera_matrix)
relative_rotation_matrix, relative_translation_vector = cv.recoverPose(
essential_matrix,
image_points_1,
image_points_2,
camera_matrix,
mask=mask)[1:3]
relative_rotation_vector = cv.Rodrigues(relative_rotation_matrix)[0]
relative_translation_vector = relative_translation_vector * distance_between_cameras
rotation_vector_2, translation_vector_2 = compose_transformations(
rotation_vector_1,
translation_vector_1,
relative_rotation_vector,
relative_translation_vector)
rotation_vector_2 = np.squeeze(rotation_vector_2)
translation_vector_2 = np.squeeze(translation_vector_2)
return rotation_vector_2, translation_vector_2
def reconstruct_object_points_from_camera_poses(
image_points_1,
image_points_2,
camera_matrix,
rotation_vector_1,
translation_vector_1,
rotation_vector_2,
translation_vector_2):
image_points_1 = np.asarray(image_points_1)
image_points_2 = np.asarray(image_points_2)
camera_matrix = np.asarray(camera_matrix)
rotation_vector_1 = np.asarray(rotation_vector_1)
translation_vector_1 = np.asarray(translation_vector_1)
rotation_vector_2 = np.asarray(rotation_vector_2)
translation_vector_2 = np.asarray(translation_vector_2)
if image_points_1.size == 0 or image_points_2.size == 0:
return np.zeros((0, 3))
image_points_1 = image_points_1.reshape((-1, 2))
image_points_2 = image_points_2.reshape((-1, 2))
if image_points_1.shape != image_points_2.shape:
raise ValueError('Sets of image points do not appear to be the same shape')
camera_matrix = camera_matrix.reshape((3, 3))
rotation_vector_1 = rotation_vector_1.reshape(3)
translation_vector_1 = translation_vector_1.reshape(3)
rotation_vector_2 = rotation_vector_2.reshape(3)
translation_vector_2 = translation_vector_2.reshape(3)
projection_matrix_1 = generate_projection_matrix(
camera_matrix,
rotation_vector_1,
translation_vector_1)
projection_matrix_2 = generate_projection_matrix(
camera_matrix,
rotation_vector_2,
translation_vector_2)
object_points_homogeneous = cv.triangulatePoints(
projection_matrix_1,
projection_matrix_2,
image_points_1.T,
image_points_2.T)
object_points = cv.convertPointsFromHomogeneous(
object_points_homogeneous.T)
object_points = np.squeeze(object_points)
return object_points
def reconstruct_object_points_from_relative_camera_pose(
image_points_1,
image_points_2,
camera_matrix,
relative_rotation_vector,
relative_translation_vector,
rotation_vector_1=np.array([[0.0], [0.0], [0.0]]),
translation_vector_1=np.array([[0.0], [0.0], [0.0]]),
distance_between_cameras=1.0):
image_points_1 = np.asarray(image_points_1)
image_points_2 = np.asarray(image_points_2)
camera_matrix = np.asarray(camera_matrix)
relative_rotation_vector = np.asarray(relative_rotation_vector)
relative_translation_vector = np.asarray(relative_translation_vector)
rotation_vector_1 = np.asarray(rotation_vector_1)
translation_vector_1 = np.asarray(translation_vector_1)
if image_points_1.size == 0 or image_points_2.size == 0:
return np.zeros((0, 3))
image_points_1 = image_points_1.reshape((-1, 2))
image_points_2 = image_points_2.reshape((-1, 2))
if image_points_1.shape != image_points_2.shape:
raise ValueError('Sets of image points do not appear to be the same shape')
camera_matrix = camera_matrix.reshape((3, 3))
relative_rotation_vector = relative_rotation_vector.reshape(3)
relative_translation_vector = relative_translation_vector.reshape(3)
rotation_vector_1 = rotation_vector_1.reshape(3)
translation_vector_1 = translation_vector_1.reshape(3)
rotation_vector_2, translation_vector_2 = cv.composeRT(
rotation_vector_1,
translation_vector_1,
relative_rotation_vector,
relative_translation_vector * distance_between_cameras)[:2]
object_points = reconstruct_object_points_from_camera_poses(
image_points_1,
image_points_2,
camera_matrix,
rotation_vector_1,
translation_vector_1,
rotation_vector_2,
translation_vector_2)
return object_points
def reconstruct_object_points_from_image_points(
image_points_1,
image_points_2,
camera_matrix,
rotation_vector_1=np.array([[0.0], [0.0], [0.0]]),
translation_vector_1=np.array([[0.0], [0.0], [0.0]]),
distance_between_cameras=1.0):
image_points_1 = np.asarray(image_points_1)
image_points_2 = np.asarray(image_points_2)
camera_matrix = np.asarray(camera_matrix)
rotation_vector_1 = np.asarray(rotation_vector_1)
translation_vector_1 = np.asarray(translation_vector_1)
if image_points_1.size == 0 or image_points_2.size == 0:
return np.zeros((0, 3))
image_points_1 = image_points_1.reshape((-1, 2))
image_points_2 = image_points_2.reshape((-1, 2))
if image_points_1.shape != image_points_2.shape:
raise ValueError('Sets of image points do not appear to be the same shape')
camera_matrix = camera_matrix.reshape((3, 3))
rotation_vector_1 = rotation_vector_1.reshape(3)
translation_vector_1 = translation_vector_1.reshape(3)
rotation_vector_2, translation_vector_2 = estimate_camera_pose_from_image_points(
image_points_1,
image_points_2,
camera_matrix,
rotation_vector_1,
translation_vector_1,
distance_between_cameras)
object_points = reconstruct_object_points_from_camera_poses(
image_points_1,
image_points_2,
camera_matrix,
rotation_vector_1,
translation_vector_1,
rotation_vector_2,
translation_vector_2)
return object_points
def estimate_camera_pose_from_plane_object_points(
input_object_points,
height,
origin_index,
x_axis_index,
y_reference_point,
y_reference_point_sign,
distance_calibration_indices,
calibration_distance):
input_object_points = np.asarray(input_object_points)
if input_object_points.size == 0:
raise ValueError('Obect point array appears to be empty')
input_object_points = input_object_points.reshape((-1, 3))
scale_factor = np.divide(
calibration_distance,
np.linalg.norm(
np.subtract(
input_object_points[distance_calibration_indices[0]],
input_object_points[distance_calibration_indices[1]])))
object_points_1 = np.multiply(
input_object_points,
scale_factor)
def objective_function(parameters):
rotation_x = parameters[0]
rotation_y = parameters[1]
translation_z = parameters[2]
object_points_transformed = transform_object_points(
object_points_1,
np.array([rotation_x, rotation_y, 0.0]),
np.array([0.0, 0.0, translation_z]))
return np.sum(np.square(object_points_transformed[:, 2] - height))
optimization_solution = scipy.optimize.minimize(
objective_function,
np.array([0.0, 0.0, 0.0]))
rotation_x_a = optimization_solution['x'][0]
rotation_y_a = optimization_solution['x'][1]
translation_z_a = optimization_solution['x'][2]
rotation_x_rotation_y_a_norm = np.linalg.norm([rotation_x_a, rotation_y_a])
rotation_x_b = rotation_x_a * ((rotation_x_rotation_y_a_norm + np.pi) / rotation_x_rotation_y_a_norm)
rotation_y_b = rotation_y_a * ((rotation_x_rotation_y_a_norm + np.pi) / rotation_x_rotation_y_a_norm)
translation_z_b = - translation_z_a
rotation_vector_2_a = np.array([rotation_x_a, rotation_y_a, 0.0])
translation_vector_2_a = np.array([0.0, 0.0, translation_z_a])
object_points_2_a = transform_object_points(
object_points_1,
rotation_vector_2_a,
translation_vector_2_a)
rotation_vector_2_b = np.array([rotation_x_b, rotation_y_b, 0.0])
translation_vector_2_b = np.array([0.0, 0.0, translation_z_b])
object_points_2_b = transform_object_points(
object_points_1,
rotation_vector_2_b,
translation_vector_2_b)
sign_a = np.sign(
np.cross(
np.subtract(
object_points_2_a[x_axis_index],
object_points_2_a[origin_index]),
np.subtract(
object_points_2_a[y_reference_point],
object_points_2_a[origin_index]))[2])
sign_b = np.sign(
np.cross(
np.subtract(
object_points_2_b[x_axis_index],
object_points_2_b[origin_index]),
np.subtract(
object_points_2_b[y_reference_point],
object_points_2_b[origin_index]))[2])
if sign_a == y_reference_point_sign:
rotation_vector_2 = rotation_vector_2_a
translation_vector_2 = translation_vector_2_a
object_points_2 = object_points_2_a
else:
rotation_vector_2 = rotation_vector_2_b
translation_vector_2 = translation_vector_2_b
object_points_2 = object_points_2_b
xy_shift = - object_points_2[origin_index, :2]
rotation_vector_3 = np.array([0.0, 0.0, 0.0])
translation_vector_3 = np.array([xy_shift[0], xy_shift[1], 0.0])
object_points_3 = transform_object_points(
object_points_2,
rotation_vector_3,
translation_vector_3)
final_z_rotation = - reconstruct_z_rotation(
object_points_3[x_axis_index, 0],
object_points_3[x_axis_index, 1])
rotation_vector_4 = np.array([0.0, 0.0, final_z_rotation])
translation_vector_4 = np.array([0.0, 0.0, 0.0])
object_points_4 = transform_object_points(
object_points_3,
rotation_vector_4,
translation_vector_4)
rotation_vector_2_3, translation_vector_2_3 = compose_transformations(
rotation_vector_2,
translation_vector_2,
rotation_vector_3,
translation_vector_3)
rotation_vector_2_3_4, translation_vector_2_3_4 = compose_transformations(
rotation_vector_2_3,
translation_vector_2_3,
rotation_vector_4,
translation_vector_4)
camera_rotation_vector, camera_translation_vector = invert_transformation(
rotation_vector_2_3_4,
translation_vector_2_3_4)
return camera_rotation_vector, camera_translation_vector, scale_factor, object_points_4
def estimate_camera_poses_from_plane_image_points(
image_points_1,
image_points_2,
camera_matrix,
height,
origin_index,
x_axis_index,
y_reference_point,
y_reference_point_sign,
distance_calibration_indices,
calibration_distance):
image_points_1 = np.asarray(image_points_1)
image_points_2 = np.asarray(image_points_2)
camera_matrix = np.asarray(camera_matrix)
if image_points_1.size == 0 or image_points_2.size == 0:
raise ValueError('One or both sets of image points appear to be empty')
image_points_1 = image_points_1.reshape((-1, 2))
image_points_2 = image_points_2.reshape((-1, 2))
if image_points_1.shape != image_points_2.shape:
raise ValueError('Sets of image points do not appear to be the same shape')
camera_matrix = camera_matrix.reshape((3, 3))
relative_rotation_vector, relative_translation_vector = estimate_camera_pose_from_image_points(
image_points_1,
image_points_2,
camera_matrix)
input_object_points = reconstruct_object_points_from_image_points(
image_points_1,
image_points_2,
camera_matrix)
rotation_vector_1, translation_vector_1, scale_factor = estimate_camera_pose_from_plane_object_points(
input_object_points,
height,
origin_index,
x_axis_index,
y_reference_point,
y_reference_point_sign,
distance_calibration_indices,
calibration_distance)[:3]
rotation_vector_2, translation_vector_2 = compose_transformations(
rotation_vector_1,
translation_vector_1,
relative_rotation_vector,
relative_translation_vector * scale_factor)
return rotation_vector_1, translation_vector_1, rotation_vector_2, translation_vector_2
| 37.217834
| 127
| 0.70013
|
import cv_datetime_utils
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize
import json
import os
def compose_transformations(
rotation_vector_1,
translation_vector_1,
rotation_vector_2,
translation_vector_2):
rotation_vector_1 = np.asarray(rotation_vector_1).reshape(3)
translation_vector_1 = np.asarray(translation_vector_1).reshape(3)
rotation_vector_2 = np.asarray(rotation_vector_2).reshape(3)
translation_vector_2 = np.asarray(translation_vector_2).reshape(3)
rotation_vector_composed, translation_vector_composed = cv.composeRT(
rotation_vector_1,
translation_vector_1,
rotation_vector_2,
translation_vector_2)[:2]
rotation_vector_composed = np.squeeze(rotation_vector_composed)
translation_vector_composed = np.squeeze(translation_vector_composed)
return rotation_vector_composed, translation_vector_composed
def invert_transformation(
rotation_vector,
translation_vector):
rotation_vector = np.asarray(rotation_vector).reshape(3)
translation_vector = np.asarray(translation_vector).reshape(3)
new_rotation_vector, new_translation_vector = compose_transformations(
np.array([0.0, 0.0, 0.0]),
-translation_vector,
-rotation_vector,
np.array([0.0, 0.0, 0.0]))
new_rotation_vector = np.squeeze(new_rotation_vector)
new_translation_vector = np.squeeze(new_translation_vector)
return new_rotation_vector, new_translation_vector
def quaternion_vector_to_rotation_vector(quaternion_vector):
quaternion_vector = np.asarray(quaternion_vector).reshape(4)
spatial_vector = quaternion_vector[1:]
qw = quaternion_vector[0]
spatial_vector_length = np.linalg.norm(spatial_vector)
unit_vector = spatial_vector/spatial_vector_length
theta = 2*np.arctan2(spatial_vector_length, qw)
rotation_vector = theta*unit_vector
return rotation_vector
def quaternion_vector_to_rotation_matrix(quaternion_vector):
quaternion_tuple = tuple(np.asarray(quaternion_vector).reshape(4))
qw, qx, qy, qz = quaternion_tuple
R = np.array([
[qw**2 + qx**2 - qy**2 - qz**2, 2*(qx*qy - qw*qz), 2*(qw*qy + qx*qz)],
[2*(qx*qy + qw*qz), qw**2 - qx**2 + qy**2 - qz**2, 2*(qy*qz - qw*qx)],
[2*(qx*qz - qw*qy), 2*(qw*qx + qy*qz), qw**2 - qx**2 - qy**2 + qz**2]
])
return R
def rotation_vector_to_rotation_matrix(rotation_vector):
rotation_vector = np.asarray(rotation_vector).reshape(3)
rotation_matrix = cv.Rodrigues(rotation_vector)[0]
return rotation_matrix
def transform_object_points(
object_points,
rotation_vector=np.array([0.0, 0.0, 0.0]),
translation_vector=np.array([0.0, 0.0, 0.0])):
object_points = np.asarray(object_points)
rotation_vector = np.asarray(rotation_vector)
translation_vector = np.asarray(translation_vector)
if object_points.size == 0:
return object_points
object_points = object_points.reshape((-1, 3))
rotation_vector = rotation_vector.reshape(3)
translation_vector = translation_vector.reshape(3)
transformed_points = np.add(
np.matmul(
cv.Rodrigues(rotation_vector)[0],
object_points.T).T,
translation_vector.reshape((1, 3)))
transformed_points = np.squeeze(transformed_points)
return transformed_points
def generate_camera_pose(
camera_position=np.array([0.0, 0.0, 0.0]),
yaw=0.0,
pitch=0.0,
roll=0.0):
camera_position = np.asarray(camera_position).reshape(3)
rotation_vector_1 = np.array([0.0, 0.0, 0.0])
translation_vector_1 = -camera_position
rotation_vector_2 = np.array([0.0, 0.0, -(yaw - np.pi / 2)])
translation_vector_2 = np.array([0.0, 0.0, 0.0])
rotation_vector_2_3 = np.array([(np.pi / 2 - pitch), 0.0, 0.0])
translation_vector_2_3 = np.array([0.0, 0.0, 0.0])
rotation_vector_2_3_4 = np.array([0.0, 0.0, -roll])
translation_vector_2_3_4 = np.array([0.0, 0.0, 0.0])
rotation_vector_1_2, translation_vector_1_2 = compose_transformations(
rotation_vector_1,
translation_vector_1,
rotation_vector_2,
translation_vector_2)
rotation_vector_1_2_3, translation_vector_1_2_3 = compose_transformations(
rotation_vector_1_2,
translation_vector_1_2,
rotation_vector_2_3,
translation_vector_2_3)
rotation_vector, translation_vector = compose_transformations(
rotation_vector_1_2_3,
translation_vector_1_2_3,
rotation_vector_2_3_4,
translation_vector_2_3_4)
rotation_vector = np.squeeze(rotation_vector)
translation_vector = np.squeeze(translation_vector)
return rotation_vector, translation_vector
def extract_camera_position(
rotation_vector,
translation_vector):
rotation_vector = np.asarray(rotation_vector).reshape(3)
translation_vector = np.asarray(translation_vector).reshape(3)
new_rotation_vector, new_translation_vector = compose_transformations(
rotation_vector,
translation_vector,
-rotation_vector,
np.array([0.0, 0.0, 0.0]))
camera_position = -np.squeeze(new_translation_vector)
return camera_position
def extract_camera_position_rotation_matrix(rotation_matrix, translation_vector):
rotation_matrix = np.asarray(rotation_matrix).reshape((3,3))
translation_vector = np.asarray(translation_vector).reshape(3)
position = np.matmul(rotation_matrix.T, -translation_vector.T)
return position
def extract_camera_direction(
rotation_vector,
translation_vector):
rotation_vector = np.asarray(rotation_vector).reshape(3)
translation_vector = np.asarray(translation_vector).reshape(3)
camera_direction = np.matmul(
cv.Rodrigues(-rotation_vector)[0],
np.array([[0.0], [0.0], [1.0]]))
camera_direction = np.squeeze(camera_direction)
return camera_direction
def reconstruct_z_rotation(x, y):
if x >= 0.0 and y >= 0.0:
return np.arctan(y / x)
if x >= 0.0 and y < 0.0:
return np.arctan(y / x) + 2 * np.pi
return np.arctan(y / x) + np.pi
def extract_yaw_from_camera_direction(
camera_direction):
camera_direction = np.asarray(camera_direction).reshape(3)
yaw = reconstruct_z_rotation(
camera_direction[0],
camera_direction[1])
return yaw
def generate_camera_matrix(
focal_length,
principal_point):
focal_length = np.asarray(focal_length).reshape(2)
principal_point = np.asarray(principal_point).reshape(2)
camera_matrix = np.array([
[focal_length[0], 0, principal_point[0]],
[0, focal_length[1], principal_point[1]],
[0, 0, 1.0]])
return camera_matrix
def generate_projection_matrix(
camera_matrix,
rotation_vector,
translation_vector):
camera_matrix = np.asarray(camera_matrix).reshape((3, 3))
rotation_vector = np.asarray(rotation_vector).reshape(3)
translation_vector = np.asarray(translation_vector).reshape(3)
projection_matrix = np.matmul(
camera_matrix,
np.concatenate((
cv.Rodrigues(rotation_vector)[0],
translation_vector.reshape((3, 1))),
axis=1))
return(projection_matrix)
def ground_grid_camera_view(
image_width,
image_height,
rotation_vector,
translation_vector,
camera_matrix,
distortion_coefficients=np.array([0.0, 0.0, 0.0, 0.0]),
fill_image=False,
step=0.1
):
grid_corners = ground_rectangle_camera_view(
image_width=image_width,
image_height=image_height,
rotation_vector=rotation_vector,
translation_vector=translation_vector,
camera_matrix=camera_matrix,
distortion_coefficients=distortion_coefficients,
fill_image=fill_image
)
grid_points = generate_ground_grid(
grid_corners=grid_corners,
step=step
)
return grid_points
def ground_rectangle_camera_view(
image_width,
image_height,
rotation_vector,
translation_vector,
camera_matrix,
distortion_coefficients=np.array([0.0, 0.0, 0.0, 0.0]),
fill_image=False
):
image_points = np.array([
[0.0, 0.0],
[image_width, 0.0],
[image_width, image_height],
[0.0, image_height]
])
ground_points=np.empty((4, 3))
for i in range(4):
ground_points[i] = ground_point(
image_point=image_points[i],
rotation_vector=rotation_vector,
translation_vector=translation_vector,
camera_matrix=camera_matrix,
distortion_coefficients=distortion_coefficients
)
x_values_sorted = np.sort(ground_points[:, 0])
y_values_sorted = np.sort(ground_points[:, 1])
if fill_image:
x_min = x_values_sorted[0]
x_max = x_values_sorted[3]
y_min = y_values_sorted[0]
y_max = y_values_sorted[3]
else:
x_min = x_values_sorted[1]
x_max = x_values_sorted[2]
y_min = y_values_sorted[1]
y_max = y_values_sorted[2]
return np.array([
[x_min, y_min],
[x_max, y_max]
])
def ground_point(
image_point,
rotation_vector,
translation_vector,
camera_matrix,
distortion_coefficients=np.array([0.0, 0.0, 0.0, 0.0])
):
image_point = np.asarray(image_point)
rotation_vector = np.asarray(rotation_vector)
translation_vector = np.asarray(translation_vector)
camera_matrix = np.asarray(camera_matrix)
distortion_coefficients = np.asarray(distortion_coefficients)
image_point = image_point.reshape((2))
rotation_vector = rotation_vector.reshape(3)
translation_vector = translation_vector.reshape(3)
camera_matrix = camera_matrix.reshape((3, 3))
image_point_undistorted = cv.undistortPoints(
image_point,
camera_matrix,
distortion_coefficients,
P=camera_matrix
)
image_point_undistorted = np.squeeze(image_point_undistorted)
camera_position = np.matmul(
cv.Rodrigues(-rotation_vector)[0],
-translation_vector.T
).T
camera_point_homogeneous = np.matmul(
np.linalg.inv(camera_matrix),
np.array([image_point_undistorted[0], image_point_undistorted[1], 1.0]).T
).T
camera_direction = np.matmul(
cv.Rodrigues(-rotation_vector)[0],
camera_point_homogeneous.T
).T
theta = -camera_position[2]/camera_direction[2]
ground_point = camera_position + theta*camera_direction
return ground_point
def generate_ground_grid(
grid_corners,
step=0.1
):
x_grid, y_grid = np.meshgrid(
np.arange(grid_corners[0, 0], grid_corners[1, 0], step=step),
np.arange(grid_corners[0, 1], grid_corners[1, 1], step=step)
)
grid = np.stack((x_grid, y_grid, np.full_like(x_grid, 0.0)), axis=-1)
points = grid.reshape((-1, 3))
return points
def project_points(
object_points,
rotation_vector,
translation_vector,
camera_matrix,
distortion_coefficients,
remove_behind_camera=False,
remove_outside_frame=False,
image_corners=None
):
object_points = np.asarray(object_points).reshape((-1, 3))
rotation_vector = np.asarray(rotation_vector).reshape(3)
translation_vector = np.asarray(translation_vector).reshape(3)
camera_matrix = np.asarray(camera_matrix).reshape((3, 3))
distortion_coefficients = np.squeeze(np.asarray(distortion_coefficients))
if object_points.size == 0:
return np.zeros((0, 2))
image_points = cv.projectPoints(
object_points,
rotation_vector,
translation_vector,
camera_matrix,
distortion_coefficients
)[0]
if remove_behind_camera:
behind_camera_boolean = behind_camera(
object_points,
rotation_vector,
translation_vector
)
image_points[behind_camera_boolean] = np.array([np.nan, np.nan])
if remove_outside_frame:
outside_frame_boolean = outside_frame(
object_points,
rotation_vector,
translation_vector,
camera_matrix,
distortion_coefficients,
image_corners
)
image_points[outside_frame_boolean] = np.array([np.nan, np.nan])
image_points = np.squeeze(image_points)
return image_points
def behind_camera(
object_points,
rotation_vector,
translation_vector):
object_points = np.asarray(object_points)
rotation_vector = np.asarray(rotation_vector)
translation_vector = np.asarray(translation_vector)
if object_points.size == 0:
return np.zeros((0, 2))
object_points = object_points.reshape((-1, 3))
rotation_vector = rotation_vector.reshape(3)
translation_vector = translation_vector.reshape(3)
object_points_transformed = transform_object_points(
object_points,
rotation_vector,
translation_vector
)
behind_camera_boolean = (object_points_transformed <= 0)[..., 2]
return behind_camera_boolean
def outside_frame(
object_points,
rotation_vector,
translation_vector,
camera_matrix,
distortion_coefficients,
image_corners
):
object_points = np.asarray(object_points).reshape((-1, 3))
rotation_vector = np.asarray(rotation_vector)
translation_vector = np.asarray(translation_vector).reshape(3)
camera_matrix = np.asarray(camera_matrix).reshape((3,3))
distortion_coefficients = np.squeeze(np.asarray(distortion_coefficients))
image_corners = np.asarray(image_corners).reshape((2,2))
if object_points.size == 0:
return np.zeros((0, 2))
image_points = cv.projectPoints(
object_points,
rotation_vector,
translation_vector,
camera_matrix,
np.array([0.0, 0.0, 0.0, 0.0])
)[0]
image_points = image_points.reshape((-1, 2))
outside_frame_boolean = (
(image_points[:, 0] < image_corners[0, 0]) |
(image_points[:, 0] > image_corners[1, 0]) |
(image_points[:, 1] < image_corners[0, 1]) |
(image_points[:, 1] > image_corners[1, 1])
)
return outside_frame_boolean
def undistort_points(
image_points,
camera_matrix,
distortion_coefficients):
image_points = np.asarray(image_points)
camera_matrix = np.asarray(camera_matrix)
distortion_coefficients = np.asarray(distortion_coefficients)
if image_points.size == 0:
return image_points
image_points = image_points.reshape((-1, 1, 2))
camera_matrix = camera_matrix.reshape((3, 3))
undistorted_points = cv.undistortPoints(
image_points,
camera_matrix,
distortion_coefficients,
P=camera_matrix)
undistorted_points = np.squeeze(undistorted_points)
return undistorted_points
def estimate_camera_pose_from_image_points(
image_points_1,
image_points_2,
camera_matrix,
rotation_vector_1=np.array([0.0, 0.0, 0.0]),
translation_vector_1=np.array([0.0, 0.0, 0.0]),
distance_between_cameras=1.0):
image_points_1 = np.asarray(image_points_1)
image_points_2 = np.asarray(image_points_2)
camera_matrix = np.asarray(camera_matrix)
rotation_vector_1 = np.asarray(rotation_vector_1)
translation_vector_1 = np.asarray(translation_vector_1)
if image_points_1.size == 0 or image_points_2.size == 0:
raise ValueError('One or both sets of image points appear to be empty')
image_points_1 = image_points_1.reshape((-1, 2))
image_points_2 = image_points_2.reshape((-1, 2))
if image_points_1.shape != image_points_2.shape:
raise ValueError('Sets of image points do not appear to be the same shape')
camera_matrix = camera_matrix.reshape((3, 3))
rotation_vector_1 = rotation_vector_1.reshape(3)
translation_vector_1 = translation_vector_1.reshape(3)
essential_matrix, mask = cv.findEssentialMat(
image_points_1,
image_points_2,
camera_matrix)
relative_rotation_matrix, relative_translation_vector = cv.recoverPose(
essential_matrix,
image_points_1,
image_points_2,
camera_matrix,
mask=mask)[1:3]
relative_rotation_vector = cv.Rodrigues(relative_rotation_matrix)[0]
relative_translation_vector = relative_translation_vector * distance_between_cameras
rotation_vector_2, translation_vector_2 = compose_transformations(
rotation_vector_1,
translation_vector_1,
relative_rotation_vector,
relative_translation_vector)
rotation_vector_2 = np.squeeze(rotation_vector_2)
translation_vector_2 = np.squeeze(translation_vector_2)
return rotation_vector_2, translation_vector_2
def reconstruct_object_points_from_camera_poses(
image_points_1,
image_points_2,
camera_matrix,
rotation_vector_1,
translation_vector_1,
rotation_vector_2,
translation_vector_2):
image_points_1 = np.asarray(image_points_1)
image_points_2 = np.asarray(image_points_2)
camera_matrix = np.asarray(camera_matrix)
rotation_vector_1 = np.asarray(rotation_vector_1)
translation_vector_1 = np.asarray(translation_vector_1)
rotation_vector_2 = np.asarray(rotation_vector_2)
translation_vector_2 = np.asarray(translation_vector_2)
if image_points_1.size == 0 or image_points_2.size == 0:
return np.zeros((0, 3))
image_points_1 = image_points_1.reshape((-1, 2))
image_points_2 = image_points_2.reshape((-1, 2))
if image_points_1.shape != image_points_2.shape:
raise ValueError('Sets of image points do not appear to be the same shape')
camera_matrix = camera_matrix.reshape((3, 3))
rotation_vector_1 = rotation_vector_1.reshape(3)
translation_vector_1 = translation_vector_1.reshape(3)
rotation_vector_2 = rotation_vector_2.reshape(3)
translation_vector_2 = translation_vector_2.reshape(3)
projection_matrix_1 = generate_projection_matrix(
camera_matrix,
rotation_vector_1,
translation_vector_1)
projection_matrix_2 = generate_projection_matrix(
camera_matrix,
rotation_vector_2,
translation_vector_2)
object_points_homogeneous = cv.triangulatePoints(
projection_matrix_1,
projection_matrix_2,
image_points_1.T,
image_points_2.T)
object_points = cv.convertPointsFromHomogeneous(
object_points_homogeneous.T)
object_points = np.squeeze(object_points)
return object_points
def reconstruct_object_points_from_relative_camera_pose(
image_points_1,
image_points_2,
camera_matrix,
relative_rotation_vector,
relative_translation_vector,
rotation_vector_1=np.array([[0.0], [0.0], [0.0]]),
translation_vector_1=np.array([[0.0], [0.0], [0.0]]),
distance_between_cameras=1.0):
image_points_1 = np.asarray(image_points_1)
image_points_2 = np.asarray(image_points_2)
camera_matrix = np.asarray(camera_matrix)
relative_rotation_vector = np.asarray(relative_rotation_vector)
relative_translation_vector = np.asarray(relative_translation_vector)
rotation_vector_1 = np.asarray(rotation_vector_1)
translation_vector_1 = np.asarray(translation_vector_1)
if image_points_1.size == 0 or image_points_2.size == 0:
return np.zeros((0, 3))
image_points_1 = image_points_1.reshape((-1, 2))
image_points_2 = image_points_2.reshape((-1, 2))
if image_points_1.shape != image_points_2.shape:
raise ValueError('Sets of image points do not appear to be the same shape')
camera_matrix = camera_matrix.reshape((3, 3))
relative_rotation_vector = relative_rotation_vector.reshape(3)
relative_translation_vector = relative_translation_vector.reshape(3)
rotation_vector_1 = rotation_vector_1.reshape(3)
translation_vector_1 = translation_vector_1.reshape(3)
rotation_vector_2, translation_vector_2 = cv.composeRT(
rotation_vector_1,
translation_vector_1,
relative_rotation_vector,
relative_translation_vector * distance_between_cameras)[:2]
object_points = reconstruct_object_points_from_camera_poses(
image_points_1,
image_points_2,
camera_matrix,
rotation_vector_1,
translation_vector_1,
rotation_vector_2,
translation_vector_2)
return object_points
def reconstruct_object_points_from_image_points(
image_points_1,
image_points_2,
camera_matrix,
rotation_vector_1=np.array([[0.0], [0.0], [0.0]]),
translation_vector_1=np.array([[0.0], [0.0], [0.0]]),
distance_between_cameras=1.0):
image_points_1 = np.asarray(image_points_1)
image_points_2 = np.asarray(image_points_2)
camera_matrix = np.asarray(camera_matrix)
rotation_vector_1 = np.asarray(rotation_vector_1)
translation_vector_1 = np.asarray(translation_vector_1)
if image_points_1.size == 0 or image_points_2.size == 0:
return np.zeros((0, 3))
image_points_1 = image_points_1.reshape((-1, 2))
image_points_2 = image_points_2.reshape((-1, 2))
if image_points_1.shape != image_points_2.shape:
raise ValueError('Sets of image points do not appear to be the same shape')
camera_matrix = camera_matrix.reshape((3, 3))
rotation_vector_1 = rotation_vector_1.reshape(3)
translation_vector_1 = translation_vector_1.reshape(3)
rotation_vector_2, translation_vector_2 = estimate_camera_pose_from_image_points(
image_points_1,
image_points_2,
camera_matrix,
rotation_vector_1,
translation_vector_1,
distance_between_cameras)
object_points = reconstruct_object_points_from_camera_poses(
image_points_1,
image_points_2,
camera_matrix,
rotation_vector_1,
translation_vector_1,
rotation_vector_2,
translation_vector_2)
return object_points
def estimate_camera_pose_from_plane_object_points(
input_object_points,
height,
origin_index,
x_axis_index,
y_reference_point,
y_reference_point_sign,
distance_calibration_indices,
calibration_distance):
input_object_points = np.asarray(input_object_points)
if input_object_points.size == 0:
raise ValueError('Obect point array appears to be empty')
input_object_points = input_object_points.reshape((-1, 3))
scale_factor = np.divide(
calibration_distance,
np.linalg.norm(
np.subtract(
input_object_points[distance_calibration_indices[0]],
input_object_points[distance_calibration_indices[1]])))
object_points_1 = np.multiply(
input_object_points,
scale_factor)
def objective_function(parameters):
rotation_x = parameters[0]
rotation_y = parameters[1]
translation_z = parameters[2]
object_points_transformed = transform_object_points(
object_points_1,
np.array([rotation_x, rotation_y, 0.0]),
np.array([0.0, 0.0, translation_z]))
return np.sum(np.square(object_points_transformed[:, 2] - height))
optimization_solution = scipy.optimize.minimize(
objective_function,
np.array([0.0, 0.0, 0.0]))
rotation_x_a = optimization_solution['x'][0]
rotation_y_a = optimization_solution['x'][1]
translation_z_a = optimization_solution['x'][2]
rotation_x_rotation_y_a_norm = np.linalg.norm([rotation_x_a, rotation_y_a])
rotation_x_b = rotation_x_a * ((rotation_x_rotation_y_a_norm + np.pi) / rotation_x_rotation_y_a_norm)
rotation_y_b = rotation_y_a * ((rotation_x_rotation_y_a_norm + np.pi) / rotation_x_rotation_y_a_norm)
translation_z_b = - translation_z_a
rotation_vector_2_a = np.array([rotation_x_a, rotation_y_a, 0.0])
translation_vector_2_a = np.array([0.0, 0.0, translation_z_a])
object_points_2_a = transform_object_points(
object_points_1,
rotation_vector_2_a,
translation_vector_2_a)
rotation_vector_2_b = np.array([rotation_x_b, rotation_y_b, 0.0])
translation_vector_2_b = np.array([0.0, 0.0, translation_z_b])
object_points_2_b = transform_object_points(
object_points_1,
rotation_vector_2_b,
translation_vector_2_b)
sign_a = np.sign(
np.cross(
np.subtract(
object_points_2_a[x_axis_index],
object_points_2_a[origin_index]),
np.subtract(
object_points_2_a[y_reference_point],
object_points_2_a[origin_index]))[2])
sign_b = np.sign(
np.cross(
np.subtract(
object_points_2_b[x_axis_index],
object_points_2_b[origin_index]),
np.subtract(
object_points_2_b[y_reference_point],
object_points_2_b[origin_index]))[2])
if sign_a == y_reference_point_sign:
rotation_vector_2 = rotation_vector_2_a
translation_vector_2 = translation_vector_2_a
object_points_2 = object_points_2_a
else:
rotation_vector_2 = rotation_vector_2_b
translation_vector_2 = translation_vector_2_b
object_points_2 = object_points_2_b
xy_shift = - object_points_2[origin_index, :2]
rotation_vector_3 = np.array([0.0, 0.0, 0.0])
translation_vector_3 = np.array([xy_shift[0], xy_shift[1], 0.0])
object_points_3 = transform_object_points(
object_points_2,
rotation_vector_3,
translation_vector_3)
final_z_rotation = - reconstruct_z_rotation(
object_points_3[x_axis_index, 0],
object_points_3[x_axis_index, 1])
rotation_vector_4 = np.array([0.0, 0.0, final_z_rotation])
translation_vector_4 = np.array([0.0, 0.0, 0.0])
object_points_4 = transform_object_points(
object_points_3,
rotation_vector_4,
translation_vector_4)
rotation_vector_2_3, translation_vector_2_3 = compose_transformations(
rotation_vector_2,
translation_vector_2,
rotation_vector_3,
translation_vector_3)
rotation_vector_2_3_4, translation_vector_2_3_4 = compose_transformations(
rotation_vector_2_3,
translation_vector_2_3,
rotation_vector_4,
translation_vector_4)
camera_rotation_vector, camera_translation_vector = invert_transformation(
rotation_vector_2_3_4,
translation_vector_2_3_4)
return camera_rotation_vector, camera_translation_vector, scale_factor, object_points_4
def estimate_camera_poses_from_plane_image_points(
image_points_1,
image_points_2,
camera_matrix,
height,
origin_index,
x_axis_index,
y_reference_point,
y_reference_point_sign,
distance_calibration_indices,
calibration_distance):
image_points_1 = np.asarray(image_points_1)
image_points_2 = np.asarray(image_points_2)
camera_matrix = np.asarray(camera_matrix)
if image_points_1.size == 0 or image_points_2.size == 0:
raise ValueError('One or both sets of image points appear to be empty')
image_points_1 = image_points_1.reshape((-1, 2))
image_points_2 = image_points_2.reshape((-1, 2))
if image_points_1.shape != image_points_2.shape:
raise ValueError('Sets of image points do not appear to be the same shape')
camera_matrix = camera_matrix.reshape((3, 3))
relative_rotation_vector, relative_translation_vector = estimate_camera_pose_from_image_points(
image_points_1,
image_points_2,
camera_matrix)
input_object_points = reconstruct_object_points_from_image_points(
image_points_1,
image_points_2,
camera_matrix)
rotation_vector_1, translation_vector_1, scale_factor = estimate_camera_pose_from_plane_object_points(
input_object_points,
height,
origin_index,
x_axis_index,
y_reference_point,
y_reference_point_sign,
distance_calibration_indices,
calibration_distance)[:3]
rotation_vector_2, translation_vector_2 = compose_transformations(
rotation_vector_1,
translation_vector_1,
relative_rotation_vector,
relative_translation_vector * scale_factor)
return rotation_vector_1, translation_vector_1, rotation_vector_2, translation_vector_2
| true
| true
|
f70422428d41dc6563266192de2998e6f21fc6af
| 4,188
|
py
|
Python
|
plugins/esni/client.py
|
tgragnato/geneva
|
2fc5b2f2f4766278902cff25af50b753d1d26a76
|
[
"BSD-3-Clause"
] | 1,182
|
2019-11-15T02:56:47.000Z
|
2022-03-30T16:09:04.000Z
|
plugins/esni/client.py
|
Nekotekina/geneva
|
3eb6b7342f9afd7add1f4aba9e2aadf0b9a5f196
|
[
"BSD-3-Clause"
] | 21
|
2019-11-15T15:08:02.000Z
|
2022-01-03T16:22:45.000Z
|
plugins/esni/client.py
|
Nekotekina/geneva
|
3eb6b7342f9afd7add1f4aba9e2aadf0b9a5f196
|
[
"BSD-3-Clause"
] | 102
|
2019-11-15T15:01:07.000Z
|
2022-03-30T13:52:47.000Z
|
"""
Client
Run by the evaluator, sends a TLS Client Hello with the ESNI extension, followed by two test packets.
"""
import argparse
import binascii as bi
import os
import socket
import time
socket.setdefaulttimeout(1)
from plugins.plugin_client import ClientPlugin
class ESNIClient(ClientPlugin):
"""
Defines the ESNI client.
"""
name = "esni"
def __init__(self, args):
"""
Initializes the esni client.
"""
ClientPlugin.__init__(self)
self.args = args
@staticmethod
def get_args(command):
"""
Defines required args for this plugin
"""
super_args = ClientPlugin.get_args(command)
parser = argparse.ArgumentParser(description='ESNI Client')
parser.add_argument('--server', action='store', help="server to connect to")
args, _ = parser.parse_known_args(command)
args = vars(args)
super_args.update(args)
return super_args
def run(self, args, logger, engine=None):
"""
Try to make a forbidden GET request to the server.
"""
fitness = 0
port = int(args["port"])
server = args["server"]
# Client Hello with the ESNI extension
msg = b'16030103ae010003aa0303d992f9c22fbe7a7cdbc9619924bd9cc13c057f5f3da1829426cb0944292705152033c5be80af6de7633e07680125e27e3f7b80ff5e9b3cbe5278434c90b9e0e5fa0024130113031302c02bc02fcca9cca8c02cc030c00ac009c013c014009c009d002f0035000a0100033d00170000ff01000100000a000e000c001d00170018001901000101000b000201000010000e000c02683208687474702f312e310005000501000000000033006b0069001d002019570ada256d971048b34d3e9ff5607588bf10cfb6c064fc45a0fc401d9a7c470017004104ea047fd2e0fc3314de4bf03ee6205134f0d15c07f62b77625a95dc194ce8fb88cc16e53c8b400ba463915b87480b247851c095abdb0d3d5d5b14dd77dcd73750002b00050403040303000d0018001604030503060308040805080604010501060102030201002d00020101ffce016e1301001d00203652aaf122dc47dcf9fa8c37377476d050e54119adfb518f7aabd842ac97d23b00205a30e70593f57708370310ecf7054e488a62eb11e01fd059851c442d453d15c5012441910eec152c4df5ff28bf5cddb1a2e54e8595197e3dc36325145ad50a7842eb3860c8fc6ac5c1794017101365c6122abb3b81f31f5f4204eebb244252d22600734424d875948657b892d3aab3310491aff3b5126f1186bd9c321fb446cf2a41985dd206364ea28c3f8aafeafc62e039f157c3f2703a35448d2d16dcf2d5055ce58c024a5b4eb780fc5128af4ba4e90d6eef1b3cf30a5b2000448d65d6af4fffabeb91e1ed2093fdcc6ffd87ceb94429864ddb657e6316654631193fd25840e51645e1708d351140dd6eeefb80ddbaebb250b2975a1d5f291d99f89de4553d083f1b9820a3ee6976357cff433b7eb77febb3eb0db012154154d3e19b4409f8afa11aa1baeb0b7663d97f0caca2b11ed971fc574588e76a37aa4259593fe8e07fbbca27fa001c00024001002900eb00c600c07f87fafe9de4168227aeec4540f1aaeae43ff61a353f5480420ac3c33f90003fe6f501080bf04f22576a0cc1db8dc83d37b25859a81ce0277364a1794cde1c60f3b94175477beff56db7f9e2b83b31383b7d8b5da20834fb0a63d7ba2e42ad3dfa21666ed8621f34273ac5c273d7f492750e3df3bae36e398ddf83d4a7c36f639087f14eb1f7bfb2c7c0c736d69bcdbf21158c07b7088b95e5bcd08138d6b511f6492d7d93bb3729641519097b970cfeffa5882c67111dcf5d7966a1c58b4edb6e8c905a002120e47ccba37d89e4c1d979c6ef954d1cd946eff0d3119aa2b4d6411138aec74579'
try:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(5)
client.connect((server, port))
client.sendall(bi.unhexlify(msg))
time.sleep(2)
client.sendall(b"test packet")
time.sleep(2)
client.sendall(b"test packet 2")
server_data = client.recv(1024)
logger.debug("Data recieved: %s", server_data.decode('utf-8', 'ignore'))
fitness += 100
client.close()
except socket.timeout:
# Happens on connect, not sendall
logger.debug("Client: Timeout")
fitness -= 110
except socket.error as exc:
fitness -= 100
logger.exception("Socket error caught in client esni test.")
except Exception:
logger.exception("Exception caught in client esni test.")
fitness = -120
finally:
logger.debug("Client finished esni test.")
return fitness * 4
| 51.073171
| 1,911
| 0.773878
|
import argparse
import binascii as bi
import os
import socket
import time
socket.setdefaulttimeout(1)
from plugins.plugin_client import ClientPlugin
class ESNIClient(ClientPlugin):
name = "esni"
def __init__(self, args):
ClientPlugin.__init__(self)
self.args = args
@staticmethod
def get_args(command):
super_args = ClientPlugin.get_args(command)
parser = argparse.ArgumentParser(description='ESNI Client')
parser.add_argument('--server', action='store', help="server to connect to")
args, _ = parser.parse_known_args(command)
args = vars(args)
super_args.update(args)
return super_args
def run(self, args, logger, engine=None):
fitness = 0
port = int(args["port"])
server = args["server"]
msg = b'16030103ae010003aa0303d992f9c22fbe7a7cdbc9619924bd9cc13c057f5f3da1829426cb0944292705152033c5be80af6de7633e07680125e27e3f7b80ff5e9b3cbe5278434c90b9e0e5fa0024130113031302c02bc02fcca9cca8c02cc030c00ac009c013c014009c009d002f0035000a0100033d00170000ff01000100000a000e000c001d00170018001901000101000b000201000010000e000c02683208687474702f312e310005000501000000000033006b0069001d002019570ada256d971048b34d3e9ff5607588bf10cfb6c064fc45a0fc401d9a7c470017004104ea047fd2e0fc3314de4bf03ee6205134f0d15c07f62b77625a95dc194ce8fb88cc16e53c8b400ba463915b87480b247851c095abdb0d3d5d5b14dd77dcd73750002b00050403040303000d0018001604030503060308040805080604010501060102030201002d00020101ffce016e1301001d00203652aaf122dc47dcf9fa8c37377476d050e54119adfb518f7aabd842ac97d23b00205a30e70593f57708370310ecf7054e488a62eb11e01fd059851c442d453d15c5012441910eec152c4df5ff28bf5cddb1a2e54e8595197e3dc36325145ad50a7842eb3860c8fc6ac5c1794017101365c6122abb3b81f31f5f4204eebb244252d22600734424d875948657b892d3aab3310491aff3b5126f1186bd9c321fb446cf2a41985dd206364ea28c3f8aafeafc62e039f157c3f2703a35448d2d16dcf2d5055ce58c024a5b4eb780fc5128af4ba4e90d6eef1b3cf30a5b2000448d65d6af4fffabeb91e1ed2093fdcc6ffd87ceb94429864ddb657e6316654631193fd25840e51645e1708d351140dd6eeefb80ddbaebb250b2975a1d5f291d99f89de4553d083f1b9820a3ee6976357cff433b7eb77febb3eb0db012154154d3e19b4409f8afa11aa1baeb0b7663d97f0caca2b11ed971fc574588e76a37aa4259593fe8e07fbbca27fa001c00024001002900eb00c600c07f87fafe9de4168227aeec4540f1aaeae43ff61a353f5480420ac3c33f90003fe6f501080bf04f22576a0cc1db8dc83d37b25859a81ce0277364a1794cde1c60f3b94175477beff56db7f9e2b83b31383b7d8b5da20834fb0a63d7ba2e42ad3dfa21666ed8621f34273ac5c273d7f492750e3df3bae36e398ddf83d4a7c36f639087f14eb1f7bfb2c7c0c736d69bcdbf21158c07b7088b95e5bcd08138d6b511f6492d7d93bb3729641519097b970cfeffa5882c67111dcf5d7966a1c58b4edb6e8c905a002120e47ccba37d89e4c1d979c6ef954d1cd946eff0d3119aa2b4d6411138aec74579'
try:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(5)
client.connect((server, port))
client.sendall(bi.unhexlify(msg))
time.sleep(2)
client.sendall(b"test packet")
time.sleep(2)
client.sendall(b"test packet 2")
server_data = client.recv(1024)
logger.debug("Data recieved: %s", server_data.decode('utf-8', 'ignore'))
fitness += 100
client.close()
except socket.timeout:
logger.debug("Client: Timeout")
fitness -= 110
except socket.error as exc:
fitness -= 100
logger.exception("Socket error caught in client esni test.")
except Exception:
logger.exception("Exception caught in client esni test.")
fitness = -120
finally:
logger.debug("Client finished esni test.")
return fitness * 4
| true
| true
|
f7042265d6e1253b3102acc3edcb9d1e660f92e1
| 3,860
|
py
|
Python
|
ichnaea/scripts/dump.py
|
crankycoder/ichnaea
|
fb54000e92c605843b7a41521e36fd648c11ae94
|
[
"Apache-2.0"
] | 1
|
2019-05-12T05:51:19.000Z
|
2019-05-12T05:51:19.000Z
|
ichnaea/scripts/dump.py
|
crankycoder/ichnaea
|
fb54000e92c605843b7a41521e36fd648c11ae94
|
[
"Apache-2.0"
] | null | null | null |
ichnaea/scripts/dump.py
|
crankycoder/ichnaea
|
fb54000e92c605843b7a41521e36fd648c11ae94
|
[
"Apache-2.0"
] | null | null | null |
"""
Dump/export our own data to a local file.
Script is installed as `location_dump`.
"""
import argparse
import os
import os.path
import sys
from sqlalchemy import text
from ichnaea.db import (
configure_db,
db_worker_session,
)
from ichnaea.geocalc import bbox
from ichnaea.log import (
configure_logging,
LOGGER,
)
from ichnaea.models import (
BlueShard,
CellShard,
WifiShard,
)
from ichnaea import util
def where_area(lat, lon, radius):
# Construct a where clause based on a bounding box around the given
# center point.
if lat is None or lon is None or radius is None:
return None
max_lat, min_lat, max_lon, min_lon = bbox(lat, lon, radius)
return '`lat` <= %s and `lat` >= %s and `lon` <= %s and `lon` >= %s' % (
round(max_lat, 5), round(min_lat, 5),
round(max_lon, 5), round(min_lon, 5))
def dump_model(shard_model, session, fd, where=None):
fd.write(shard_model.export_header() + '\n')
for model in shard_model.shards().values():
LOGGER.info('Exporting table: %s', model.__tablename__)
stmt = model.export_stmt()
if where:
stmt = stmt.replace(' WHERE ', ' WHERE %s AND ' % where)
stmt = text(stmt)
min_key = ''
limit = 25000
while True:
rows = session.execute(
stmt.bindparams(
export_key=min_key,
limit=limit
)).fetchall()
if rows:
buf = '\n'.join([row.export_value for row in rows])
if buf:
buf += '\n'
fd.write(buf)
min_key = rows[-1].export_key
else:
break
def dump_file(datatype, session, filename,
lat=None, lon=None, radius=None):
model = {
'blue': BlueShard,
'cell': CellShard,
'wifi': WifiShard,
}
where = where_area(lat, lon, radius)
with util.gzip_open(filename, 'w') as fd:
dump_model(model[datatype], session, fd, where=where)
return 0
def main(argv, _db=None, _dump_file=dump_file):
parser = argparse.ArgumentParser(
prog=argv[0], description='Dump/export data.')
parser.add_argument('--datatype', required=True,
help='Type of the data file, blue, cell or wifi')
parser.add_argument('--filename', required=True,
help='Path to the csv.gz export file.')
parser.add_argument('--lat', default=None,
help='The center latitude of the desired area.')
parser.add_argument('--lon', default=None,
help='The center longitude of the desired area.')
parser.add_argument('--radius', default=None,
help='The radius of the desired area.')
args = parser.parse_args(argv[1:])
if not args.filename: # pragma: no cover
parser.print_help()
return 1
filename = os.path.abspath(os.path.expanduser(args.filename))
if os.path.isfile(filename): # pragma: no cover
print('File already exists.')
return 1
datatype = args.datatype
if datatype not in ('blue', 'cell', 'wifi'): # pragma: no cover
print('Unknown data type.')
return 1
lat, lon, radius = (None, None, None)
if (args.lat is not None and
args.lon is not None and args.radius is not None):
lat = float(args.lat)
lon = float(args.lon)
radius = int(args.radius)
configure_logging()
db = configure_db('ro', transport='sync', _db=_db)
with db_worker_session(db, commit=False) as session:
exit_code = _dump_file(
datatype, session, filename, lat=lat, lon=lon, radius=radius)
return exit_code
def console_entry(): # pragma: no cover
sys.exit(main(sys.argv))
| 29.922481
| 76
| 0.589119
|
import argparse
import os
import os.path
import sys
from sqlalchemy import text
from ichnaea.db import (
configure_db,
db_worker_session,
)
from ichnaea.geocalc import bbox
from ichnaea.log import (
configure_logging,
LOGGER,
)
from ichnaea.models import (
BlueShard,
CellShard,
WifiShard,
)
from ichnaea import util
def where_area(lat, lon, radius):
if lat is None or lon is None or radius is None:
return None
max_lat, min_lat, max_lon, min_lon = bbox(lat, lon, radius)
return '`lat` <= %s and `lat` >= %s and `lon` <= %s and `lon` >= %s' % (
round(max_lat, 5), round(min_lat, 5),
round(max_lon, 5), round(min_lon, 5))
def dump_model(shard_model, session, fd, where=None):
fd.write(shard_model.export_header() + '\n')
for model in shard_model.shards().values():
LOGGER.info('Exporting table: %s', model.__tablename__)
stmt = model.export_stmt()
if where:
stmt = stmt.replace(' WHERE ', ' WHERE %s AND ' % where)
stmt = text(stmt)
min_key = ''
limit = 25000
while True:
rows = session.execute(
stmt.bindparams(
export_key=min_key,
limit=limit
)).fetchall()
if rows:
buf = '\n'.join([row.export_value for row in rows])
if buf:
buf += '\n'
fd.write(buf)
min_key = rows[-1].export_key
else:
break
def dump_file(datatype, session, filename,
lat=None, lon=None, radius=None):
model = {
'blue': BlueShard,
'cell': CellShard,
'wifi': WifiShard,
}
where = where_area(lat, lon, radius)
with util.gzip_open(filename, 'w') as fd:
dump_model(model[datatype], session, fd, where=where)
return 0
def main(argv, _db=None, _dump_file=dump_file):
parser = argparse.ArgumentParser(
prog=argv[0], description='Dump/export data.')
parser.add_argument('--datatype', required=True,
help='Type of the data file, blue, cell or wifi')
parser.add_argument('--filename', required=True,
help='Path to the csv.gz export file.')
parser.add_argument('--lat', default=None,
help='The center latitude of the desired area.')
parser.add_argument('--lon', default=None,
help='The center longitude of the desired area.')
parser.add_argument('--radius', default=None,
help='The radius of the desired area.')
args = parser.parse_args(argv[1:])
if not args.filename: parser.print_help()
return 1
filename = os.path.abspath(os.path.expanduser(args.filename))
if os.path.isfile(filename): print('File already exists.')
return 1
datatype = args.datatype
if datatype not in ('blue', 'cell', 'wifi'): print('Unknown data type.')
return 1
lat, lon, radius = (None, None, None)
if (args.lat is not None and
args.lon is not None and args.radius is not None):
lat = float(args.lat)
lon = float(args.lon)
radius = int(args.radius)
configure_logging()
db = configure_db('ro', transport='sync', _db=_db)
with db_worker_session(db, commit=False) as session:
exit_code = _dump_file(
datatype, session, filename, lat=lat, lon=lon, radius=radius)
return exit_code
def console_entry(): sys.exit(main(sys.argv))
| true
| true
|
f704245cfebd32dde87c35a40024697c586c21ce
| 430
|
py
|
Python
|
pi/python_scripts/read_arduino.py
|
jonathantobi/starcore-hackomation-2017
|
585cca88c60b33e87b217c02c5b86aafe658321f
|
[
"MIT"
] | null | null | null |
pi/python_scripts/read_arduino.py
|
jonathantobi/starcore-hackomation-2017
|
585cca88c60b33e87b217c02c5b86aafe658321f
|
[
"MIT"
] | null | null | null |
pi/python_scripts/read_arduino.py
|
jonathantobi/starcore-hackomation-2017
|
585cca88c60b33e87b217c02c5b86aafe658321f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import serial
import time
ser = serial.Serial(
port = '/dev/ttyACM1',
baudrate = 9600,
parity = serial.PARITY_NONE,
stopbits = serial.STOPBITS_ONE,
bytesize = serial.EIGHTBITS
)
while 1:
ser.flush()
line = ser.readline().decode().strip()
gas, fire = line.split(",")
print("gas-level: ", gas)
print("fire-level: ", fire)
time.sleep(1)
| 17.916667
| 42
| 0.574419
|
import serial
import time
ser = serial.Serial(
port = '/dev/ttyACM1',
baudrate = 9600,
parity = serial.PARITY_NONE,
stopbits = serial.STOPBITS_ONE,
bytesize = serial.EIGHTBITS
)
while 1:
ser.flush()
line = ser.readline().decode().strip()
gas, fire = line.split(",")
print("gas-level: ", gas)
print("fire-level: ", fire)
time.sleep(1)
| true
| true
|
f70424690ab9ff0152bd6626bc23b9b94c8fdc03
| 12,043
|
py
|
Python
|
sparqlkernel/drawgraph.py
|
alexisdimi/sparql-kernel
|
7acd28810d48ef127a716f00bd76f67d59d7ba69
|
[
"BSD-3-Clause"
] | 93
|
2016-09-13T21:50:30.000Z
|
2022-02-13T09:46:40.000Z
|
sparqlkernel/drawgraph.py
|
alexisdimi/sparql-kernel
|
7acd28810d48ef127a716f00bd76f67d59d7ba69
|
[
"BSD-3-Clause"
] | 33
|
2017-03-30T10:12:32.000Z
|
2021-08-12T12:23:36.000Z
|
sparqlkernel/drawgraph.py
|
alexisdimi/sparql-kernel
|
7acd28810d48ef127a716f00bd76f67d59d7ba69
|
[
"BSD-3-Clause"
] | 18
|
2017-02-12T17:09:08.000Z
|
2022-02-02T08:32:48.000Z
|
"""
Convert an RDF graph into an image for displaying in the notebook, via GraphViz
It has two parts:
- conversion from rdf into dot language. Code based in rdflib.utils.rdf2dot
- rendering of the dot graph into an image. Code based on
ipython-hierarchymagic, which in turn bases it from Sphinx
See https://github.com/tkf/ipython-hierarchymagic
License for RDFLIB
------------------
Copyright (c) 2002-2015, RDFLib Team
See CONTRIBUTORS and http://github.com/RDFLib/rdflib
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Daniel Krech nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
License for ipython-hierarchymagic
----------------------------------
ipython-hierarchymagic is licensed under the term of the Simplified
BSD License (BSD 2-clause license), as follows:
Copyright (c) 2012 Takafumi Arakaki
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
License for Sphinx
------------------
`run_dot` function and `HierarchyMagic._class_name` method in this
extension heavily based on Sphinx code `sphinx.ext.graphviz.render_dot`
and `InheritanceGraph.class_name`.
Copyright notice for Sphinx can be found below.
Copyright (c) 2007-2011 by the Sphinx team (see AUTHORS file).
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import errno
import base64
import re
from io import StringIO
import rdflib
from .utils import escape
import logging
LOG = logging.getLogger(__name__)
# ------------------------------------------------------------------------
LABEL_PROPERTIES = [
rdflib.RDFS.label,
rdflib.URIRef('http://schema.org/name'),
rdflib.URIRef('http://www.w3.org/2000/01/rdf-schema#label'),
rdflib.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
rdflib.URIRef("http://purl.org/dc/elements/1.1/title"),
rdflib.URIRef("http://xmlns.com/foaf/0.1/name"),
rdflib.URIRef("http://www.w3.org/2006/vcard/ns#fn"),
rdflib.URIRef("http://www.w3.org/2006/vcard/ns#org"),
]
def label(x, gr, preferred_languages=None):
'''
@param x: graph entity
@param gr (Graph): RDF graph
@param preferred_languages (iterable): list of preferred language codes for
the labels.
Return the best available label in the graph for the passed entity.
If a set of preferred languages is given, try them in order. If none is
found, an arbitrary language will be chosen
'''
# Find all labels & their language
labels = {l.language: l
for labelProp in LABEL_PROPERTIES
for l in gr.objects(x, labelProp)}
#LOG.debug("LABELS %s %s", labels, preferred_languages)
#return repr(preferred_languages) + repr(labels)
if labels:
# Search the preferred language
if preferred_languages is not None:
for l in preferred_languages:
if l in labels:
return labels[l]
# If not found, return an arbitrary language
return labels.popitem()[1]
# No labels available. Try to generate a QNAME, or else, the string itself
try:
return gr.namespace_manager.compute_qname(x)[2].replace('_', ' ')
except Exception:
# Attempt to extract the trailing part of an URI
m = re.search('([^/]+)$', x)
return m.group(1).replace('_', ' ') if m else x
def rdf2dot(g, stream, opts={}):
'''
Convert the RDF graph to DOT
Write the dot output to the stream
'''
LOG.debug("RDF2DOT: %s", opts)
accept_lang = opts.get('lang', [])
do_literal = opts.get('literal')
nodes = {}
def node_id(x):
if x not in nodes:
nodes[x] = "node%d" % len(nodes)
return nodes[x]
def qname(x, g):
try:
q = g.compute_qname(x)
return q[0] + ":" + q[2]
except Exception:
return x
def accept(node):
if isinstance(node, (rdflib.URIRef, rdflib.BNode)):
return True
if not do_literal:
return False
return (not accept_lang) or (node.language in accept_lang)
stream.write(u'digraph { \n node [ fontname="DejaVu Sans,Tahoma,Geneva,sans-serif" ] ; \n')
# Write all edges. In the process make a list of all nodes
for s, p, o in g:
# skip triples for labels
if p == rdflib.RDFS.label:
continue
# Create a link if both objects are graph nodes
# (or, if literals are also included, if their languages match)
if not (accept(s) and accept(o)):
continue
# add the nodes to the list
sn = node_id(s)
on = node_id(o)
# add the link
q = qname(p, g)
if isinstance(p, rdflib.URIRef):
opstr = u'\t%s -> %s [ arrowhead="open", color="#9FC9E560", fontsize=9, fontcolor="#204080", label="%s", href="%s", target="_other" ] ;\n' % (sn, on, q, p)
else:
opstr = u'\t%s -> %s [ arrowhead="open", color="#9FC9E560", fontsize=9, fontcolor="#204080", label="%s" ] ;\n' % (sn, on, q)
stream.write(opstr)
# Write all nodes
for u, n in nodes.items():
lbl = escape(label(u, g, accept_lang), True)
if isinstance(u, rdflib.URIRef):
opstr = u'%s [ shape=none, fontsize=10, fontcolor=%s, label="%s", href="%s", target=_other ] \n' % (n, 'blue', lbl, u)
else:
opstr = u'%s [ shape=none, fontsize=10, fontcolor=%s, label="%s" ] \n' % (n, 'black', lbl)
stream.write(u"# %s %s\n" % (u, n))
stream.write(opstr)
stream.write(u'}\n')
# ------------------------------------------------------------------------
EPIPE = getattr(errno, 'EPIPE', 0)
EINVAL = getattr(errno, 'EINVAL', 0)
def run_dot(code, fmt='svg', gv_options=[], **kwargs):
'''
Run GraphViz on the buffer holding the graph
'''
LOG.debug("rundot fmt=%s options=%s", fmt, gv_options)
# mostly copied from sphinx.ext.graphviz.render_dot
import os
from subprocess import Popen, PIPE
dot_args = [kwargs.get('prg', 'dot')] + gv_options + ['-T', fmt]
if os.name == 'nt':
# Avoid opening shell window.
# * https://github.com/tkf/ipython-hierarchymagic/issues/1
# * http://stackoverflow.com/a/2935727/727827
p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE,
creationflags=0x08000000)
else:
p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE)
wentwrong = False
try:
# Graphviz may close standard input when an error occurs,
# resulting in a broken pipe on communicate()
stdout, stderr = p.communicate(code.encode('utf-8'))
except OSError as err:
if err.errno != EPIPE:
raise
wentwrong = True
except IOError as err:
if err.errno != EINVAL:
raise
wentwrong = True
if wentwrong:
# in this case, read the standard output and standard error streams
# directly, to get the error message(s)
stdout, stderr = p.stdout.read(), p.stderr.read()
p.wait()
if p.returncode != 0:
raise RuntimeError(u'dot exited with error:\n[stderr]\n{0}'
.format(stderr.decode('utf-8')))
return stdout
# ------------------------------------------------------------------------
def draw_graph(g, fmt='svg', prg='dot', options={}):
'''
Draw an RDF graph as an image
'''
# Convert RDF to Graphviz
buf = StringIO()
rdf2dot(g, buf, options)
gv_options = options.get('graphviz', [])
if fmt == 'png':
gv_options += ['-Gdpi=220', '-Gsize=25,10!']
metadata = {"width": 5500, "height": 2200, "unconfined": True}
#import codecs
#with codecs.open('/tmp/sparqlkernel-img.dot','w',encoding='utf-8') as f:
# f.write( buf.getvalue() )
# Now use Graphviz to generate the graph
image = run_dot(buf.getvalue(), fmt=fmt, options=gv_options, prg=prg)
#with open('/tmp/sparqlkernel-img.'+fmt,'w') as f:
# f.write( image )
# Return it
if fmt == 'png':
return {'image/png': base64.b64encode(image).decode('ascii')}, \
{'image/png': metadata}
elif fmt == 'svg':
img = image.decode('utf-8').replace('<svg', '<svg class="unconfined"', 1)
return {'image/svg+xml': img}, \
{'unconfined': True}
| 37.990536
| 167
| 0.663456
|
import errno
import base64
import re
from io import StringIO
import rdflib
from .utils import escape
import logging
LOG = logging.getLogger(__name__)
LABEL_PROPERTIES = [
rdflib.RDFS.label,
rdflib.URIRef('http://schema.org/name'),
rdflib.URIRef('http://www.w3.org/2000/01/rdf-schema#label'),
rdflib.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
rdflib.URIRef("http://purl.org/dc/elements/1.1/title"),
rdflib.URIRef("http://xmlns.com/foaf/0.1/name"),
rdflib.URIRef("http://www.w3.org/2006/vcard/ns#fn"),
rdflib.URIRef("http://www.w3.org/2006/vcard/ns#org"),
]
def label(x, gr, preferred_languages=None):
labels = {l.language: l
for labelProp in LABEL_PROPERTIES
for l in gr.objects(x, labelProp)}
if labels:
if preferred_languages is not None:
for l in preferred_languages:
if l in labels:
return labels[l]
return labels.popitem()[1]
try:
return gr.namespace_manager.compute_qname(x)[2].replace('_', ' ')
except Exception:
m = re.search('([^/]+)$', x)
return m.group(1).replace('_', ' ') if m else x
def rdf2dot(g, stream, opts={}):
LOG.debug("RDF2DOT: %s", opts)
accept_lang = opts.get('lang', [])
do_literal = opts.get('literal')
nodes = {}
def node_id(x):
if x not in nodes:
nodes[x] = "node%d" % len(nodes)
return nodes[x]
def qname(x, g):
try:
q = g.compute_qname(x)
return q[0] + ":" + q[2]
except Exception:
return x
def accept(node):
if isinstance(node, (rdflib.URIRef, rdflib.BNode)):
return True
if not do_literal:
return False
return (not accept_lang) or (node.language in accept_lang)
stream.write(u'digraph { \n node [ fontname="DejaVu Sans,Tahoma,Geneva,sans-serif" ] ; \n')
for s, p, o in g:
if p == rdflib.RDFS.label:
continue
if not (accept(s) and accept(o)):
continue
sn = node_id(s)
on = node_id(o)
q = qname(p, g)
if isinstance(p, rdflib.URIRef):
opstr = u'\t%s -> %s [ arrowhead="open", color="#9FC9E560", fontsize=9, fontcolor="#204080", label="%s", href="%s", target="_other" ] ;\n' % (sn, on, q, p)
else:
opstr = u'\t%s -> %s [ arrowhead="open", color="#9FC9E560", fontsize=9, fontcolor="#204080", label="%s" ] ;\n' % (sn, on, q)
stream.write(opstr)
for u, n in nodes.items():
lbl = escape(label(u, g, accept_lang), True)
if isinstance(u, rdflib.URIRef):
opstr = u'%s [ shape=none, fontsize=10, fontcolor=%s, label="%s", href="%s", target=_other ] \n' % (n, 'blue', lbl, u)
else:
opstr = u'%s [ shape=none, fontsize=10, fontcolor=%s, label="%s" ] \n' % (n, 'black', lbl)
stream.write(u"# %s %s\n" % (u, n))
stream.write(opstr)
stream.write(u'}\n')
EPIPE = getattr(errno, 'EPIPE', 0)
EINVAL = getattr(errno, 'EINVAL', 0)
def run_dot(code, fmt='svg', gv_options=[], **kwargs):
LOG.debug("rundot fmt=%s options=%s", fmt, gv_options)
import os
from subprocess import Popen, PIPE
dot_args = [kwargs.get('prg', 'dot')] + gv_options + ['-T', fmt]
if os.name == 'nt':
p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE,
creationflags=0x08000000)
else:
p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE)
wentwrong = False
try:
stdout, stderr = p.communicate(code.encode('utf-8'))
except OSError as err:
if err.errno != EPIPE:
raise
wentwrong = True
except IOError as err:
if err.errno != EINVAL:
raise
wentwrong = True
if wentwrong:
stdout, stderr = p.stdout.read(), p.stderr.read()
p.wait()
if p.returncode != 0:
raise RuntimeError(u'dot exited with error:\n[stderr]\n{0}'
.format(stderr.decode('utf-8')))
return stdout
def draw_graph(g, fmt='svg', prg='dot', options={}):
buf = StringIO()
rdf2dot(g, buf, options)
gv_options = options.get('graphviz', [])
if fmt == 'png':
gv_options += ['-Gdpi=220', '-Gsize=25,10!']
metadata = {"width": 5500, "height": 2200, "unconfined": True}
image = run_dot(buf.getvalue(), fmt=fmt, options=gv_options, prg=prg)
if fmt == 'png':
return {'image/png': base64.b64encode(image).decode('ascii')}, \
{'image/png': metadata}
elif fmt == 'svg':
img = image.decode('utf-8').replace('<svg', '<svg class="unconfined"', 1)
return {'image/svg+xml': img}, \
{'unconfined': True}
| true
| true
|
f704250e86256c23937f52f4509e295bb75c89c6
| 15,560
|
py
|
Python
|
genesis/genesis.py
|
nneveu/lume-genesis
|
2df9a246dcc7752c60f3439c651e35aaf81006d3
|
[
"Apache-2.0"
] | null | null | null |
genesis/genesis.py
|
nneveu/lume-genesis
|
2df9a246dcc7752c60f3439c651e35aaf81006d3
|
[
"Apache-2.0"
] | null | null | null |
genesis/genesis.py
|
nneveu/lume-genesis
|
2df9a246dcc7752c60f3439c651e35aaf81006d3
|
[
"Apache-2.0"
] | null | null | null |
"""
LUME-Genesis primary class
"""
from genesis import archive, lattice, parsers, tools, writers
import h5py
import tempfile
from time import time
import shutil
import os
def find_genesis2_executable(genesis_exe=None, verbose=False):
"""
Searches for the genesis2 executable.
"""
if genesis_exe:
exe = tools.full_path(genesis_exe)
if os.path.exists(exe):
if verbose:
print(f'Using user provided executable: {exe}')
return exe
else:
raise ValueError(f'Genesis executable does not exist: {exe}')
for exe in [tools.full_path('$GENESIS_BIN'), shutil.which('genesis2')]:
if os.path.exists(exe):
if verbose:
print(f'Using found executable: {exe}')
return exe
raise ValueError('No Genesisi executable found')
class Genesis:
"""
LUME-Genesis class to parse input, run genesis, and parse output.
By default, a temporary directory is created for working.
"""
def __init__(self, input_file=None,
genesis_bin=None,
use_tempdir=True,
workdir=None,
verbose=False
):
# Save init
self.original_input_file = input_file
self.use_tempdir = use_tempdir
self.workdir = workdir
if workdir:
assert os.path.exists(workdir), 'workdir does not exist: '+workdir
self.verbose=verbose
self.genesis_bin = find_genesis2_executable(genesis_bin, verbose=verbose)
self.binary_prefixes = [] # For example, ['mpirun', '-n', '2']
self.finished = False
#
self.output = {}
#
self.timeout = None
# Run control
self.finished = False
self.configured = False
if input_file:
self.load_input(input_file)
self.configure()
else:
self.vprint('Warning: Input file does not exist. Not configured. Please call .load_input(input_file) and .configure()')
def configure(self):
self.configure_genesis(workdir=self.workdir)
def configure_genesis(self, input_filePath=None, workdir=None):
"""
Configures working directory.
"""
if input_filePath:
self.load_input(input_filePath)
# Set paths
if self.use_tempdir:
# Need to attach this to the object. Otherwise it will go out of scope.
self.tempdir = tempfile.TemporaryDirectory(dir=self.workdir)
self.path = self.tempdir.name
else:
if workdir:
self.path = workdir
self.tempdir = None
else:
# Work in place
self.path = self.original_path
# Make full path
self.input_file = os.path.join(self.path, 'genesis.in')
self.vprint('Configured to run in:', self.path)
self.configured = True
# Conveniences
@property
def beam(self):
return self.input['beam']
@property
def lattice(self):
try:
return self.input['lattice']
except:
print('No lattice found, assuming lattice is defined in input file.')
return None
@property
def param(self):
return self.input['param']
def load_input(self, filePath):
"""
Loads existing input file, with lattice
"""
assert os.path.exists(filePath), f'Input file does not exist: {filePath}'
f = tools.full_path(filePath)
self.original_path, self.input_file = os.path.split(f) # Get original path, name of main input
self.input = {
'beam':None
}
d = self.input
main = parsers.parse_main_inputfile(filePath)
d['param'] = main
if main['beamfile'] != '':
fname = main['beamfile']
d['beam'] = parsers.parse_beam_file(main['beamfile'], verbose=self.verbose)
# Use this new name
main['beamfile'] = parsers.POSSIBLE_INPUT_FILES['beamfile']
else:
d['beam'] = None
if main['maginfile'] != '':
self.load_lattice(filePath=main['maginfile'], verbose=self.verbose)
# Use this new name
main['maginfile'] = parsers.POSSIBLE_INPUT_FILES['maginfile']
else:
main['lattice'] = None
def load_output(self, filePath=None):
if not filePath:
fname = os.path.join(self.path, self.param['outputfile'])
else:
fname = filePath
if os.path.exists(fname):
self.output.update(parsers.parse_genesis_out(fname))
self.vprint('Loaded output:', fname)
# Final field
dflfile = fname+'.dfl'
if os.path.exists(dflfile):
self.output['data']['dfl'] = parsers.parse_genesis_dfl(dflfile, self.param['ncar'])
self.vprint('Loaded dfl:', dflfile)
# Field history
fldfile = fname+'.fld'
if os.path.exists(fldfile):
# Time independent is just one slice
if self['itdp'] == 0:
nslice = 1
else:
nslice = self.param['nslice']
self.output['data']['fld'] = parsers.parse_genesis_fld(fldfile, self.param['ncar'], nslice)
self.vprint('Loaded fld:', fldfile)
# Final particles
dpafile = fname+'.dpa'
if os.path.exists(dpafile):
self.output['data']['dpa'] = parsers.parse_genesis_dpa(dpafile, self.param['npart'])
self.vprint('Loaded dpa:', dpafile)
# Particle history
parfile = fname+'.par'
if os.path.exists(parfile):
self.output['data']['par'] = parsers.parse_genesis_dpa(parfile, self.param['npart'])
self.vprint('Loaded par:', parfile)
#
def load_lattice(self, filePath=None, verbose=False):
"""
loads an original Genesis-style lattice into a standard_lattice
"""
if not filePath:
fname = os.path.join(self.path, self.param['maginfile'])
else:
fname = filePath
self.vprint('loading lattice: ', fname)
lat = parsers.parse_genesis_lattice(fname)
# Standardize
lat['eles'] = lattice.standard_eles_from_eles(lat['eles'])
self.input['lattice'] = lat
def write_beam(self, filePath=None):
if not self.beam:
return
if not filePath:
filePath = os.path.join(self.path, self.param['beamfile'])
writers.write_beam_file(filePath, self.beam, verbose=self.verbose)
def write_input(self):
"""
Writes all input files
"""
self.write_input_file()
self.write_beam()
self.write_lattice()
# Write the run script
self.get_run_script()
def write_input_file(self):
"""
Write parameters to main .in file
"""
lines = tools.namelist_lines(self.param, start='$newrun', end='$end')
with open(self.input_file, 'w') as f:
for line in lines:
f.write(line+'\n')
def write_lattice(self):
if not self.lattice:
self.input['lattice'] = None
else:
filePath = os.path.join(self.path, self.param['maginfile'])
print(self.path, self.param['maginfile'])
lattice.write_lattice(filePath, self.lattice)
self.vprint('Lattice written:', filePath)
def write_wavefront(self, h5=None):
"""
Write an openPMD wavefront from the dfl
"""
if not h5:
h5 = 'genesis_wavefront_'+self.fingerprint()+'.h5'
if isinstance(h5, str):
fname = os.path.expandvars(h5)
g = h5py.File(fname, 'w')
self.vprint(f'Writing wavefront (dfl data) to file {fname}')
else:
g = h5
dfl = self.output['data']['dfl']
param = self.output['param']
writers.write_openpmd_wavefront_h5(g, dfl=dfl, param=param)
return h5
def get_run_script(self, write_to_path=True):
"""
Assembles the run script. Optionally writes a file 'run' with this line to path.
"""
_, infile = os.path.split(self.input_file)
runscript = [self.genesis_bin, infile]
# Allow for MPI commands
if len(self.binary_prefixes) > 0:
runscript = self.binary_prefixes + runscript
if write_to_path:
filename = os.path.join(self.path, 'run')
with open(filename, 'w') as f:
f.write(' '.join(runscript))
tools.make_executable(filename)
return runscript
def run(self):
if not self.configured:
print('not configured to run')
return
self.run_genesis(verbose=self.verbose, timeout=self.timeout)
def run_genesis(self, verbose=False, parse_output=True, timeout=None):
# Check that binary exists
self.genesis_bin = tools.full_path(self.genesis_bin)
assert os.path.exists(self.genesis_bin), 'Genesis binary does not exist: '+ self.genesis_bin
# Clear old output
self.output = {}
run_info = self.output['run_info'] = {}
t1 = time()
run_info['start_time'] = t1
# Move to local directory
# Save init dir
init_dir = os.getcwd()
self.vprint('init dir: ', init_dir)
os.chdir(self.path)
# Debugging
self.vprint('Running genesis in '+os.getcwd())
# Write all input
self.write_input()
runscript = self.get_run_script()
run_info['run_script'] = ' '.join(runscript)
try:
if timeout:
res = tools.execute2(runscript, timeout=timeout)
log = res['log']
self.error = res['error']
run_info['why_error'] = res['why_error']
else:
# Interactive output, for Jupyter
log = []
for path in tools.execute(runscript):
self.vprint(path, end="")
log.append(path)
self.log = log
self.error = False
if parse_output:
self.load_output()
except Exception as ex:
print('Run Aborted', ex)
self.error = True
run_info['why_error'] = str(ex)
finally:
run_info['run_time'] = time() - t1
run_info['run_error'] = self.error
# Return to init_dir
os.chdir(init_dir)
self.finished = True
def fingerprint(self):
"""
Data fingerprint using the input.
"""
return tools.fingerprint(self.input)
def vprint(self, *args, **kwargs):
# Verbose print
if self.verbose:
print(*args, **kwargs)
def input_twiss(self):
betax = self['rxbeam']**2 * self['gamma0'] / self['emitx']
betay = self['rybeam']**2 * self['gamma0'] / self['emity']
alphax = self['alphax']
alphay = self['alphay']
return {'betax':betax, 'betay':betay, 'alphax':alphax, 'alphay':alphay}
def archive(self, h5=None):
"""
Archive all data to an h5 handle or filename.
If no file is given, a file based on the fingerprint will be created.
"""
if not h5:
h5 = 'genesis_'+self.fingerprint()+'.h5'
if isinstance(h5, str):
fname = os.path.expandvars(h5)
g = h5py.File(fname, 'w')
self.vprint(f'Archiving to file {fname}')
else:
g = h5
# Write basic attributes
archive.genesis_init(g)
# All input
archive.write_input_h5(g, self.input, name='input')
# All output
archive.write_output_h5(g, self.output, name='output', verbose=self.verbose)
return h5
def load_archive(self, h5, configure=True):
"""
Loads input and output from archived h5 file.
See: Genesis.archive
"""
if isinstance(h5, str):
fname = os.path.expandvars(h5)
g = h5py.File(fname, 'r')
glist = archive.find_genesis_archives(g)
n = len(glist)
if n == 0:
# legacy: try top level
message = 'legacy'
elif n == 1:
gname = glist[0]
message = f'group {gname} from'
g = g[gname]
else:
raise ValueError(f'Multiple archives found in file {fname}: {glist}')
self.vprint(f'Reading {message} archive file {h5}')
else:
g = h5
self.input = archive.read_input_h5(g['input'])
self.output = archive.read_output_h5(g['output'], verbose=self.verbose)
self.vprint('Loaded from archive. Note: Must reconfigure to run again.')
self.configured = False
if configure:
self.configure()
def copy(self):
"""
Returns a deep copy of this object.
If a tempdir is being used, will clear this and deconfigure.
"""
G2 = deepcopy(self)
# Clear this
if G2.use_tempdir:
G2.path = None
G2.configured = False
return G2
def __getitem__(self, key):
"""
Convenience syntax to get an attribute
See: __setitem__
"""
if key in self.param:
return self.param[key]
raise ValueError(f'{key} does not exist in input param')
def __setitem__(self, key, item):
"""
Convenience syntax to set input parameters
Example:
G['ncar'] = 251
"""
if key in self.param:
self.param[key] = item
else:
raise ValueError(f'{key} does not exist in input param')
def __str__(self):
path = self.path
s = ''
if self.finished:
s += 'Genesis finished in '+path
elif self.configured:
s += 'Genesis configured in '+path
else:
s += 'Genesis not configured.'
return s
| 29.303202
| 131
| 0.507326
|
from genesis import archive, lattice, parsers, tools, writers
import h5py
import tempfile
from time import time
import shutil
import os
def find_genesis2_executable(genesis_exe=None, verbose=False):
if genesis_exe:
exe = tools.full_path(genesis_exe)
if os.path.exists(exe):
if verbose:
print(f'Using user provided executable: {exe}')
return exe
else:
raise ValueError(f'Genesis executable does not exist: {exe}')
for exe in [tools.full_path('$GENESIS_BIN'), shutil.which('genesis2')]:
if os.path.exists(exe):
if verbose:
print(f'Using found executable: {exe}')
return exe
raise ValueError('No Genesisi executable found')
class Genesis:
def __init__(self, input_file=None,
genesis_bin=None,
use_tempdir=True,
workdir=None,
verbose=False
):
self.original_input_file = input_file
self.use_tempdir = use_tempdir
self.workdir = workdir
if workdir:
assert os.path.exists(workdir), 'workdir does not exist: '+workdir
self.verbose=verbose
self.genesis_bin = find_genesis2_executable(genesis_bin, verbose=verbose)
self.binary_prefixes = [] self.finished = False
self.output = {}
self.timeout = None
self.finished = False
self.configured = False
if input_file:
self.load_input(input_file)
self.configure()
else:
self.vprint('Warning: Input file does not exist. Not configured. Please call .load_input(input_file) and .configure()')
def configure(self):
self.configure_genesis(workdir=self.workdir)
def configure_genesis(self, input_filePath=None, workdir=None):
if input_filePath:
self.load_input(input_filePath)
if self.use_tempdir:
self.tempdir = tempfile.TemporaryDirectory(dir=self.workdir)
self.path = self.tempdir.name
else:
if workdir:
self.path = workdir
self.tempdir = None
else:
self.path = self.original_path
self.input_file = os.path.join(self.path, 'genesis.in')
self.vprint('Configured to run in:', self.path)
self.configured = True
@property
def beam(self):
return self.input['beam']
@property
def lattice(self):
try:
return self.input['lattice']
except:
print('No lattice found, assuming lattice is defined in input file.')
return None
@property
def param(self):
return self.input['param']
def load_input(self, filePath):
assert os.path.exists(filePath), f'Input file does not exist: {filePath}'
f = tools.full_path(filePath)
self.original_path, self.input_file = os.path.split(f)
self.input = {
'beam':None
}
d = self.input
main = parsers.parse_main_inputfile(filePath)
d['param'] = main
if main['beamfile'] != '':
fname = main['beamfile']
d['beam'] = parsers.parse_beam_file(main['beamfile'], verbose=self.verbose)
main['beamfile'] = parsers.POSSIBLE_INPUT_FILES['beamfile']
else:
d['beam'] = None
if main['maginfile'] != '':
self.load_lattice(filePath=main['maginfile'], verbose=self.verbose)
main['maginfile'] = parsers.POSSIBLE_INPUT_FILES['maginfile']
else:
main['lattice'] = None
def load_output(self, filePath=None):
if not filePath:
fname = os.path.join(self.path, self.param['outputfile'])
else:
fname = filePath
if os.path.exists(fname):
self.output.update(parsers.parse_genesis_out(fname))
self.vprint('Loaded output:', fname)
dflfile = fname+'.dfl'
if os.path.exists(dflfile):
self.output['data']['dfl'] = parsers.parse_genesis_dfl(dflfile, self.param['ncar'])
self.vprint('Loaded dfl:', dflfile)
fldfile = fname+'.fld'
if os.path.exists(fldfile):
if self['itdp'] == 0:
nslice = 1
else:
nslice = self.param['nslice']
self.output['data']['fld'] = parsers.parse_genesis_fld(fldfile, self.param['ncar'], nslice)
self.vprint('Loaded fld:', fldfile)
dpafile = fname+'.dpa'
if os.path.exists(dpafile):
self.output['data']['dpa'] = parsers.parse_genesis_dpa(dpafile, self.param['npart'])
self.vprint('Loaded dpa:', dpafile)
parfile = fname+'.par'
if os.path.exists(parfile):
self.output['data']['par'] = parsers.parse_genesis_dpa(parfile, self.param['npart'])
self.vprint('Loaded par:', parfile)
def load_lattice(self, filePath=None, verbose=False):
if not filePath:
fname = os.path.join(self.path, self.param['maginfile'])
else:
fname = filePath
self.vprint('loading lattice: ', fname)
lat = parsers.parse_genesis_lattice(fname)
lat['eles'] = lattice.standard_eles_from_eles(lat['eles'])
self.input['lattice'] = lat
def write_beam(self, filePath=None):
if not self.beam:
return
if not filePath:
filePath = os.path.join(self.path, self.param['beamfile'])
writers.write_beam_file(filePath, self.beam, verbose=self.verbose)
def write_input(self):
self.write_input_file()
self.write_beam()
self.write_lattice()
self.get_run_script()
def write_input_file(self):
lines = tools.namelist_lines(self.param, start='$newrun', end='$end')
with open(self.input_file, 'w') as f:
for line in lines:
f.write(line+'\n')
def write_lattice(self):
if not self.lattice:
self.input['lattice'] = None
else:
filePath = os.path.join(self.path, self.param['maginfile'])
print(self.path, self.param['maginfile'])
lattice.write_lattice(filePath, self.lattice)
self.vprint('Lattice written:', filePath)
def write_wavefront(self, h5=None):
if not h5:
h5 = 'genesis_wavefront_'+self.fingerprint()+'.h5'
if isinstance(h5, str):
fname = os.path.expandvars(h5)
g = h5py.File(fname, 'w')
self.vprint(f'Writing wavefront (dfl data) to file {fname}')
else:
g = h5
dfl = self.output['data']['dfl']
param = self.output['param']
writers.write_openpmd_wavefront_h5(g, dfl=dfl, param=param)
return h5
def get_run_script(self, write_to_path=True):
_, infile = os.path.split(self.input_file)
runscript = [self.genesis_bin, infile]
if len(self.binary_prefixes) > 0:
runscript = self.binary_prefixes + runscript
if write_to_path:
filename = os.path.join(self.path, 'run')
with open(filename, 'w') as f:
f.write(' '.join(runscript))
tools.make_executable(filename)
return runscript
def run(self):
if not self.configured:
print('not configured to run')
return
self.run_genesis(verbose=self.verbose, timeout=self.timeout)
def run_genesis(self, verbose=False, parse_output=True, timeout=None):
self.genesis_bin = tools.full_path(self.genesis_bin)
assert os.path.exists(self.genesis_bin), 'Genesis binary does not exist: '+ self.genesis_bin
self.output = {}
run_info = self.output['run_info'] = {}
t1 = time()
run_info['start_time'] = t1
init_dir = os.getcwd()
self.vprint('init dir: ', init_dir)
os.chdir(self.path)
self.vprint('Running genesis in '+os.getcwd())
self.write_input()
runscript = self.get_run_script()
run_info['run_script'] = ' '.join(runscript)
try:
if timeout:
res = tools.execute2(runscript, timeout=timeout)
log = res['log']
self.error = res['error']
run_info['why_error'] = res['why_error']
else:
log = []
for path in tools.execute(runscript):
self.vprint(path, end="")
log.append(path)
self.log = log
self.error = False
if parse_output:
self.load_output()
except Exception as ex:
print('Run Aborted', ex)
self.error = True
run_info['why_error'] = str(ex)
finally:
run_info['run_time'] = time() - t1
run_info['run_error'] = self.error
os.chdir(init_dir)
self.finished = True
def fingerprint(self):
return tools.fingerprint(self.input)
def vprint(self, *args, **kwargs):
if self.verbose:
print(*args, **kwargs)
def input_twiss(self):
betax = self['rxbeam']**2 * self['gamma0'] / self['emitx']
betay = self['rybeam']**2 * self['gamma0'] / self['emity']
alphax = self['alphax']
alphay = self['alphay']
return {'betax':betax, 'betay':betay, 'alphax':alphax, 'alphay':alphay}
def archive(self, h5=None):
if not h5:
h5 = 'genesis_'+self.fingerprint()+'.h5'
if isinstance(h5, str):
fname = os.path.expandvars(h5)
g = h5py.File(fname, 'w')
self.vprint(f'Archiving to file {fname}')
else:
g = h5
archive.genesis_init(g)
archive.write_input_h5(g, self.input, name='input')
archive.write_output_h5(g, self.output, name='output', verbose=self.verbose)
return h5
def load_archive(self, h5, configure=True):
if isinstance(h5, str):
fname = os.path.expandvars(h5)
g = h5py.File(fname, 'r')
glist = archive.find_genesis_archives(g)
n = len(glist)
if n == 0:
message = 'legacy'
elif n == 1:
gname = glist[0]
message = f'group {gname} from'
g = g[gname]
else:
raise ValueError(f'Multiple archives found in file {fname}: {glist}')
self.vprint(f'Reading {message} archive file {h5}')
else:
g = h5
self.input = archive.read_input_h5(g['input'])
self.output = archive.read_output_h5(g['output'], verbose=self.verbose)
self.vprint('Loaded from archive. Note: Must reconfigure to run again.')
self.configured = False
if configure:
self.configure()
def copy(self):
G2 = deepcopy(self)
if G2.use_tempdir:
G2.path = None
G2.configured = False
return G2
def __getitem__(self, key):
if key in self.param:
return self.param[key]
raise ValueError(f'{key} does not exist in input param')
def __setitem__(self, key, item):
if key in self.param:
self.param[key] = item
else:
raise ValueError(f'{key} does not exist in input param')
def __str__(self):
path = self.path
s = ''
if self.finished:
s += 'Genesis finished in '+path
elif self.configured:
s += 'Genesis configured in '+path
else:
s += 'Genesis not configured.'
return s
| true
| true
|
f70426e7636a41481d4afd382f74991b955ea9c2
| 527
|
py
|
Python
|
tools/telemetry/telemetry/core/backends/chrome/websocket.py
|
nagineni/chromium-crosswalk
|
5725642f1c67d0f97e8613ec1c3e8107ab53fdf8
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2015-03-04T02:36:53.000Z
|
2016-06-25T11:22:17.000Z
|
tools/telemetry/telemetry/core/backends/chrome/websocket.py
|
j4ckfrost/android_external_chromium_org
|
a1a3dad8b08d1fcf6b6b36c267158ed63217c780
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
tools/telemetry/telemetry/core/backends/chrome/websocket.py
|
j4ckfrost/android_external_chromium_org
|
a1a3dad8b08d1fcf6b6b36c267158ed63217c780
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 4
|
2015-02-09T08:49:30.000Z
|
2017-08-26T02:03:34.000Z
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from telemetry.core import util
util.AddDirToPythonPath(
util.GetTelemetryDir(), 'third_party', 'websocket-client')
from websocket import create_connection # pylint: disable=W0611
from websocket import WebSocketException # pylint: disable=W0611
from websocket import WebSocketTimeoutException # pylint: disable=W0611
| 40.538462
| 72
| 0.806452
|
from __future__ import absolute_import
from telemetry.core import util
util.AddDirToPythonPath(
util.GetTelemetryDir(), 'third_party', 'websocket-client')
from websocket import create_connection from websocket import WebSocketException from websocket import WebSocketTimeoutException
| true
| true
|
f704278c8ede91d34c07a8b24640a36ec58b289c
| 1,227
|
py
|
Python
|
tests/test_visitors/test_ast/test_keywords/test_base_exception.py
|
bekemaydin/wemake-python-styleguide
|
fad6a1d2b66012d623fe0e0bba9b5561622deeb0
|
[
"MIT"
] | null | null | null |
tests/test_visitors/test_ast/test_keywords/test_base_exception.py
|
bekemaydin/wemake-python-styleguide
|
fad6a1d2b66012d623fe0e0bba9b5561622deeb0
|
[
"MIT"
] | null | null | null |
tests/test_visitors/test_ast/test_keywords/test_base_exception.py
|
bekemaydin/wemake-python-styleguide
|
fad6a1d2b66012d623fe0e0bba9b5561622deeb0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
from wemake_python_styleguide.violations.best_practices import (
BaseExceptionViolation,
)
from wemake_python_styleguide.visitors.ast.keywords import (
WrongExceptionTypeVisitor,
)
use_base_exception = """
try:
execute()
except BaseException:
raise
"""
use_except_exception = """
try:
1 / 0
except Exception:
raise
"""
use_bare_except = """
try:
1 / 0
except:
raise
"""
@pytest.mark.parametrize('code', [
use_base_exception,
])
def test_use_base_exception(
assert_errors, parse_ast_tree, code, default_options,
):
"""Testing that `except BaseException:` is restricted."""
tree = parse_ast_tree(code)
visitor = WrongExceptionTypeVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [BaseExceptionViolation])
@pytest.mark.parametrize('code', [
use_except_exception,
use_bare_except,
])
def test_use_exception(
assert_errors, parse_ast_tree, code, default_options,
):
"""Testing that `except Exception:` and `except:` are allowed."""
tree = parse_ast_tree(code)
visitor = WrongExceptionTypeVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
| 19.47619
| 69
| 0.711491
|
import pytest
from wemake_python_styleguide.violations.best_practices import (
BaseExceptionViolation,
)
from wemake_python_styleguide.visitors.ast.keywords import (
WrongExceptionTypeVisitor,
)
use_base_exception = """
try:
execute()
except BaseException:
raise
"""
use_except_exception = """
try:
1 / 0
except Exception:
raise
"""
use_bare_except = """
try:
1 / 0
except:
raise
"""
@pytest.mark.parametrize('code', [
use_base_exception,
])
def test_use_base_exception(
assert_errors, parse_ast_tree, code, default_options,
):
tree = parse_ast_tree(code)
visitor = WrongExceptionTypeVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [BaseExceptionViolation])
@pytest.mark.parametrize('code', [
use_except_exception,
use_bare_except,
])
def test_use_exception(
assert_errors, parse_ast_tree, code, default_options,
):
tree = parse_ast_tree(code)
visitor = WrongExceptionTypeVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
| true
| true
|
f704280d77be35f7fcce108be106fa90c46c3518
| 26
|
py
|
Python
|
gnutools/utils/__init__.py
|
JeanMaximilienCadic/gnutools-python
|
c247788c988f4aa14904f63df71743b75adaa16d
|
[
"MIT"
] | null | null | null |
gnutools/utils/__init__.py
|
JeanMaximilienCadic/gnutools-python
|
c247788c988f4aa14904f63df71743b75adaa16d
|
[
"MIT"
] | null | null | null |
gnutools/utils/__init__.py
|
JeanMaximilienCadic/gnutools-python
|
c247788c988f4aa14904f63df71743b75adaa16d
|
[
"MIT"
] | null | null | null |
from .functional import *
| 13
| 25
| 0.769231
|
from .functional import *
| true
| true
|
f70428bf036f48285ac70f7871aab75dca937d2d
| 1,035
|
py
|
Python
|
pomfrlFOR/examples/battle_model/algo/__init__.py
|
Sriram94/pomfrl
|
c6728f8ef6bafb0cb9e0c5007734ccb51ca341af
|
[
"MIT"
] | 7
|
2021-03-24T06:14:57.000Z
|
2022-02-09T15:27:26.000Z
|
pomfrlFOR/examples/battle_model/algo/__init__.py
|
Sriram94/pomfrl
|
c6728f8ef6bafb0cb9e0c5007734ccb51ca341af
|
[
"MIT"
] | 1
|
2021-11-24T16:55:08.000Z
|
2021-11-26T16:14:38.000Z
|
pomfrlFOR/examples/battle_model/algo/__init__.py
|
Sriram94/pomfrl
|
c6728f8ef6bafb0cb9e0c5007734ccb51ca341af
|
[
"MIT"
] | null | null | null |
from . import ac
from . import q_learning
from . import rnnq_learning
AC = ac.ActorCritic
MFAC = ac.MFAC
IL = q_learning.DQN
MFQ = q_learning.MFQ
POMFQ = q_learning.POMFQ
rnnIL = rnnq_learning.DQN
rnnMFQ = rnnq_learning.MFQ
def spawn_ai(algo_name, sess, env, handle, human_name, max_steps):
if algo_name == 'mfq':
model = MFQ(sess, human_name, handle, env, max_steps, memory_size=80000)
elif algo_name == 'mfac':
model = MFAC(sess, human_name, handle, env)
elif algo_name == 'ac':
model = AC(sess, human_name, handle, env)
elif algo_name == 'il':
model = IL(sess, human_name, handle, env, max_steps, memory_size=80000)
elif algo_name == 'rnnIL':
model = rnnIL(sess, human_name, handle, env, max_steps, memory_size=80000)
elif algo_name == 'rnnMFQ':
model = rnnMFQ(sess, human_name, handle, env, max_steps, memory_size=80000)
elif algo_name == 'pomfq':
model = POMFQ(sess, human_name, handle, env, max_steps, memory_size=80000)
return model
| 35.689655
| 83
| 0.677295
|
from . import ac
from . import q_learning
from . import rnnq_learning
AC = ac.ActorCritic
MFAC = ac.MFAC
IL = q_learning.DQN
MFQ = q_learning.MFQ
POMFQ = q_learning.POMFQ
rnnIL = rnnq_learning.DQN
rnnMFQ = rnnq_learning.MFQ
def spawn_ai(algo_name, sess, env, handle, human_name, max_steps):
if algo_name == 'mfq':
model = MFQ(sess, human_name, handle, env, max_steps, memory_size=80000)
elif algo_name == 'mfac':
model = MFAC(sess, human_name, handle, env)
elif algo_name == 'ac':
model = AC(sess, human_name, handle, env)
elif algo_name == 'il':
model = IL(sess, human_name, handle, env, max_steps, memory_size=80000)
elif algo_name == 'rnnIL':
model = rnnIL(sess, human_name, handle, env, max_steps, memory_size=80000)
elif algo_name == 'rnnMFQ':
model = rnnMFQ(sess, human_name, handle, env, max_steps, memory_size=80000)
elif algo_name == 'pomfq':
model = POMFQ(sess, human_name, handle, env, max_steps, memory_size=80000)
return model
| true
| true
|
f7042a28b38dfb50a0e690fac89b4181327b724e
| 4,329
|
py
|
Python
|
src/main.py
|
RIZY101/ctf-nc-framework
|
faf088169f58514f79c0088568019b3db5a9307b
|
[
"MIT"
] | null | null | null |
src/main.py
|
RIZY101/ctf-nc-framework
|
faf088169f58514f79c0088568019b3db5a9307b
|
[
"MIT"
] | null | null | null |
src/main.py
|
RIZY101/ctf-nc-framework
|
faf088169f58514f79c0088568019b3db5a9307b
|
[
"MIT"
] | null | null | null |
from lib.types import IStdin, IStdout
def main(stdin: IStdin, stdout: IStdout):
stdout.write('*** You are a student at PWN_University and you are all set to graduate at the end of the semester. Unfortunately the night before graduation you learned you were going to fail your last class and now you’re afraid the school wont let you graduate. Luckily you have a friend in IT and after hearing of your situation he casually sends you a message with the IP address for one of the schools secure servers. Your goal is to hack into the server and figure out a way to change your grade! ***\n')
stdout.write('\n')
stdout.write('You are requesting access to an offical PWN_University server. Only authorised individuals are allowed further.\n')
stdout.write('\n')
stdout.write('*** You remember one of your IT friends who works for the university keeps their username encoded on their desk incase they forget the spelling. So you go to their desk and find out its MTMzN3VzZXI= ***\n')
stdout.write('\n')
stdout.write('Enter your username: ')
stdout.flush()
username = stdin.readline().strip('\n')
if username == '1337user':
stdout.write('\n')
stdout.write('*** You then remember there was a data breach of all university passwords. Luckily PWN_University does not store their passwords in plain text, but rather in MD5 hashes. You navigate to the one associated with your friends username and it is 90f2c9c53f66540e67349e0ab83d8cd0 ***\n')
stdout.write('\n')
stdout.write('Now please enter your password: ')
stdout.flush()
password = stdin.readline().strip('\n')
if password == 'p@ssword':
stdout.write('Login Successful!\n')
stdout.write('\n')
stdout.write('*** Now that you have logged into the server you remember your IT friend implying that the database of grades is a mysql databse. Maybe you should try changing directories to where that is commonly stored (please use the full path) ***\n')
stdout.write('\n')
stdout.write('~$ ')
stdout.flush()
path = stdin.readline().strip('\n')
if path == 'cd /var/lib/mysql':
stdout.write('\n')
stdout.write('*** Wow it looks like your getting close you are now in the mysql directory. You run some SQL queries on the grades database and are able to select the string that says \'PWNER1337 has a F\'. All you have to do is replace F with an A (type in the SQL command to do this bellow) ***\n')
stdout.write('\n')
stdout.write('mysql> ')
stdout.flush()
sql = stdin.readline().strip('\n')
#if sql == 'REPLACE(\'PWNER1337 has a F\', \'F\', \'A\');':
if 'REPLACE' in sql and 'PWNER1337' in sql and 'F' in sql and 'A' in sql:
stdout.write('\n')
stdout.write('*** Congratulations you changed your grade from an F to an A. Unfortunatly the university caught you in the act, but because you were able to hack PWN_University they decided to let you graduate after all! ***\n')
stdout.write('\n')
stdout.write('*** Present this flag to the challenge oragnizer to claim your prize! flag{CI_NETSEC_1ST_COMP}\n')
else :
stdout.write('\n')
stdout.write('*** Oh no looks like you entered the wrong SQL command maybe you should try reconnecting to the server and try another answer... ***\n')
else :
stdout.write('\n')
stdout.write('*** Oh no looks like you entered the wrong path maybe you should try reconnecting to the server and try another answer... ***\n')
else :
stdout.write('\n')
stdout.write('Thats not the correct password access denied!\n')
stdout.write('*** Oh no looks like your access was denied maybe you should try reconnecting to the server and try another answer... ***\n')
else :
stdout.write('\n')
stdout.write('Thats not a valid username access denied!\n')
stdout.write('*** Oh no looks like your access was denied maybe you should try reconnecting to the server and try another answer... ***\n')
| 77.303571
| 513
| 0.639871
|
from lib.types import IStdin, IStdout
def main(stdin: IStdin, stdout: IStdout):
stdout.write('*** You are a student at PWN_University and you are all set to graduate at the end of the semester. Unfortunately the night before graduation you learned you were going to fail your last class and now you’re afraid the school wont let you graduate. Luckily you have a friend in IT and after hearing of your situation he casually sends you a message with the IP address for one of the schools secure servers. Your goal is to hack into the server and figure out a way to change your grade! ***\n')
stdout.write('\n')
stdout.write('You are requesting access to an offical PWN_University server. Only authorised individuals are allowed further.\n')
stdout.write('\n')
stdout.write('*** You remember one of your IT friends who works for the university keeps their username encoded on their desk incase they forget the spelling. So you go to their desk and find out its MTMzN3VzZXI= ***\n')
stdout.write('\n')
stdout.write('Enter your username: ')
stdout.flush()
username = stdin.readline().strip('\n')
if username == '1337user':
stdout.write('\n')
stdout.write('*** You then remember there was a data breach of all university passwords. Luckily PWN_University does not store their passwords in plain text, but rather in MD5 hashes. You navigate to the one associated with your friends username and it is 90f2c9c53f66540e67349e0ab83d8cd0 ***\n')
stdout.write('\n')
stdout.write('Now please enter your password: ')
stdout.flush()
password = stdin.readline().strip('\n')
if password == 'p@ssword':
stdout.write('Login Successful!\n')
stdout.write('\n')
stdout.write('*** Now that you have logged into the server you remember your IT friend implying that the database of grades is a mysql databse. Maybe you should try changing directories to where that is commonly stored (please use the full path) ***\n')
stdout.write('\n')
stdout.write('~$ ')
stdout.flush()
path = stdin.readline().strip('\n')
if path == 'cd /var/lib/mysql':
stdout.write('\n')
stdout.write('*** Wow it looks like your getting close you are now in the mysql directory. You run some SQL queries on the grades database and are able to select the string that says \'PWNER1337 has a F\'. All you have to do is replace F with an A (type in the SQL command to do this bellow) ***\n')
stdout.write('\n')
stdout.write('mysql> ')
stdout.flush()
sql = stdin.readline().strip('\n')
if 'REPLACE' in sql and 'PWNER1337' in sql and 'F' in sql and 'A' in sql:
stdout.write('\n')
stdout.write('*** Congratulations you changed your grade from an F to an A. Unfortunatly the university caught you in the act, but because you were able to hack PWN_University they decided to let you graduate after all! ***\n')
stdout.write('\n')
stdout.write('*** Present this flag to the challenge oragnizer to claim your prize! flag{CI_NETSEC_1ST_COMP}\n')
else :
stdout.write('\n')
stdout.write('*** Oh no looks like you entered the wrong SQL command maybe you should try reconnecting to the server and try another answer... ***\n')
else :
stdout.write('\n')
stdout.write('*** Oh no looks like you entered the wrong path maybe you should try reconnecting to the server and try another answer... ***\n')
else :
stdout.write('\n')
stdout.write('Thats not the correct password access denied!\n')
stdout.write('*** Oh no looks like your access was denied maybe you should try reconnecting to the server and try another answer... ***\n')
else :
stdout.write('\n')
stdout.write('Thats not a valid username access denied!\n')
stdout.write('*** Oh no looks like your access was denied maybe you should try reconnecting to the server and try another answer... ***\n')
| true
| true
|
f7042a5860ad67696f9ad5fbfa41b846180239c3
| 18,293
|
py
|
Python
|
homeassistant/components/isy994/const.py
|
Wohlraj/core
|
feed095e5bb4be0d31991530378fe48fcafbbf9c
|
[
"Apache-2.0"
] | 2
|
2021-09-13T21:44:02.000Z
|
2021-12-17T21:20:51.000Z
|
homeassistant/components/isy994/const.py
|
Wohlraj/core
|
feed095e5bb4be0d31991530378fe48fcafbbf9c
|
[
"Apache-2.0"
] | 5
|
2021-02-08T20:51:16.000Z
|
2022-03-12T00:43:18.000Z
|
homeassistant/components/isy994/const.py
|
klauern/home-assistant-core
|
c18ba6aec0627e6afb6442c678edb5ff2bb17db6
|
[
"Apache-2.0"
] | 2
|
2020-11-04T07:40:01.000Z
|
2021-09-13T21:44:03.000Z
|
"""Constants for the ISY994 Platform."""
import logging
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_COLD,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_GAS,
DEVICE_CLASS_HEAT,
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_OPENING,
DEVICE_CLASS_PROBLEM,
DEVICE_CLASS_SAFETY,
DEVICE_CLASS_SMOKE,
DEVICE_CLASS_SOUND,
DEVICE_CLASS_VIBRATION,
DOMAIN as BINARY_SENSOR,
)
from homeassistant.components.climate.const import (
CURRENT_HVAC_COOL,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
DOMAIN as CLIMATE,
FAN_AUTO,
FAN_HIGH,
FAN_MEDIUM,
FAN_ON,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
)
from homeassistant.components.cover import DOMAIN as COVER
from homeassistant.components.fan import DOMAIN as FAN
from homeassistant.components.light import DOMAIN as LIGHT
from homeassistant.components.lock import DOMAIN as LOCK
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
from homeassistant.const import (
CONCENTRATION_PARTS_PER_MILLION,
DEGREE,
ENERGY_KILO_WATT_HOUR,
FREQUENCY_HERTZ,
LENGTH_CENTIMETERS,
LENGTH_FEET,
LENGTH_INCHES,
LENGTH_KILOMETERS,
LENGTH_METERS,
LENGTH_MILES,
MASS_KILOGRAMS,
MASS_POUNDS,
POWER_WATT,
PRESSURE_INHG,
SERVICE_LOCK,
SERVICE_UNLOCK,
SPEED_KILOMETERS_PER_HOUR,
SPEED_METERS_PER_SECOND,
SPEED_MILES_PER_HOUR,
STATE_CLOSED,
STATE_CLOSING,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_OPEN,
STATE_OPENING,
STATE_PROBLEM,
STATE_UNKNOWN,
STATE_UNLOCKED,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
TEMP_KELVIN,
TIME_DAYS,
TIME_HOURS,
TIME_MILLISECONDS,
TIME_MINUTES,
TIME_MONTHS,
TIME_SECONDS,
TIME_YEARS,
UNIT_PERCENTAGE,
UV_INDEX,
VOLT,
VOLUME_GALLONS,
VOLUME_LITERS,
)
_LOGGER = logging.getLogger(__package__)
DOMAIN = "isy994"
MANUFACTURER = "Universal Devices, Inc"
CONF_IGNORE_STRING = "ignore_string"
CONF_SENSOR_STRING = "sensor_string"
CONF_VAR_SENSOR_STRING = "variable_sensor_string"
CONF_TLS_VER = "tls"
CONF_RESTORE_LIGHT_STATE = "restore_light_state"
DEFAULT_IGNORE_STRING = "{IGNORE ME}"
DEFAULT_SENSOR_STRING = "sensor"
DEFAULT_RESTORE_LIGHT_STATE = False
DEFAULT_TLS_VERSION = 1.1
DEFAULT_PROGRAM_STRING = "HA."
DEFAULT_VAR_SENSOR_STRING = "HA."
KEY_ACTIONS = "actions"
KEY_STATUS = "status"
SUPPORTED_PLATFORMS = [BINARY_SENSOR, SENSOR, LOCK, FAN, COVER, LIGHT, SWITCH, CLIMATE]
SUPPORTED_PROGRAM_PLATFORMS = [BINARY_SENSOR, LOCK, FAN, COVER, SWITCH]
SUPPORTED_BIN_SENS_CLASSES = ["moisture", "opening", "motion", "climate"]
# ISY Scenes are more like Switches than Home Assistant Scenes
# (they can turn off, and report their state)
ISY_GROUP_PLATFORM = SWITCH
ISY994_ISY = "isy"
ISY994_NODES = "isy994_nodes"
ISY994_PROGRAMS = "isy994_programs"
ISY994_VARIABLES = "isy994_variables"
FILTER_UOM = "uom"
FILTER_STATES = "states"
FILTER_NODE_DEF_ID = "node_def_id"
FILTER_INSTEON_TYPE = "insteon_type"
FILTER_ZWAVE_CAT = "zwave_cat"
# Special Subnodes for some Insteon Devices
SUBNODE_CLIMATE_COOL = 2
SUBNODE_CLIMATE_HEAT = 3
SUBNODE_DUSK_DAWN = 2
SUBNODE_EZIO2X4_SENSORS = [9, 10, 11, 12]
SUBNODE_FANLINC_LIGHT = 1
SUBNODE_HEARTBEAT = 4
SUBNODE_IOLINC_RELAY = 2
SUBNODE_LOW_BATTERY = 3
SUBNODE_MOTION_DISABLED = (13, 19) # Int->13 or Hex->0xD depending on firmware
SUBNODE_NEGATIVE = 2
SUBNODE_TAMPER = (10, 16) # Int->10 or Hex->0xA depending on firmware
# Generic Insteon Type Categories for Filters
TYPE_CATEGORY_CONTROLLERS = "0."
TYPE_CATEGORY_DIMMABLE = "1."
TYPE_CATEGORY_SWITCHED = "2."
TYPE_CATEGORY_IRRIGATION = "4."
TYPE_CATEGORY_CLIMATE = "5."
TYPE_CATEGORY_POOL_CTL = "6."
TYPE_CATEGORY_SENSOR_ACTUATORS = "7."
TYPE_CATEGORY_ENERGY_MGMT = "9."
TYPE_CATEGORY_COVER = "14."
TYPE_CATEGORY_LOCK = "15."
TYPE_CATEGORY_SAFETY = "16."
TYPE_CATEGORY_X10 = "113."
TYPE_EZIO2X4 = "7.3.255."
TYPE_INSTEON_MOTION = ("16.1.", "16.22.")
UNDO_UPDATE_LISTENER = "undo_update_listener"
# Used for discovery
UDN_UUID_PREFIX = "uuid:"
ISY_URL_POSTFIX = "/desc"
# Do not use the Home Assistant consts for the states here - we're matching exact API
# responses, not using them for Home Assistant states
# Insteon Types: https://www.universal-devices.com/developers/wsdk/5.0.4/1_fam.xml
# Z-Wave Categories: https://www.universal-devices.com/developers/wsdk/5.0.4/4_fam.xml
NODE_FILTERS = {
BINARY_SENSOR: {
FILTER_UOM: [],
FILTER_STATES: [],
FILTER_NODE_DEF_ID: [
"BinaryAlarm",
"BinaryAlarm_ADV",
"BinaryControl",
"BinaryControl_ADV",
"EZIO2x4_Input",
"EZRAIN_Input",
"OnOffControl",
"OnOffControl_ADV",
],
FILTER_INSTEON_TYPE: [
"7.0.",
"7.13.",
TYPE_CATEGORY_SAFETY,
], # Does a startswith() match; include the dot
FILTER_ZWAVE_CAT: (["104", "112", "138"] + list(map(str, range(148, 180)))),
},
SENSOR: {
# This is just a more-readable way of including MOST uoms between 1-100
# (Remember that range() is non-inclusive of the stop value)
FILTER_UOM: (
["1"]
+ list(map(str, range(3, 11)))
+ list(map(str, range(12, 51)))
+ list(map(str, range(52, 66)))
+ list(map(str, range(69, 78)))
+ ["79"]
+ list(map(str, range(82, 97)))
),
FILTER_STATES: [],
FILTER_NODE_DEF_ID: [
"IMETER_SOLO",
"EZIO2x4_Input_ADV",
"KeypadButton",
"KeypadButton_ADV",
"RemoteLinc2",
"RemoteLinc2_ADV",
],
FILTER_INSTEON_TYPE: ["0.16.", "0.17.", "0.18.", "9.0.", "9.7."],
FILTER_ZWAVE_CAT: (["118", "143"] + list(map(str, range(180, 185)))),
},
LOCK: {
FILTER_UOM: ["11"],
FILTER_STATES: ["locked", "unlocked"],
FILTER_NODE_DEF_ID: ["DoorLock"],
FILTER_INSTEON_TYPE: [TYPE_CATEGORY_LOCK, "4.64."],
FILTER_ZWAVE_CAT: ["111"],
},
FAN: {
FILTER_UOM: [],
FILTER_STATES: ["off", "low", "med", "high"],
FILTER_NODE_DEF_ID: ["FanLincMotor"],
FILTER_INSTEON_TYPE: ["1.46."],
FILTER_ZWAVE_CAT: [],
},
COVER: {
FILTER_UOM: ["97"],
FILTER_STATES: ["open", "closed", "closing", "opening", "stopped"],
FILTER_NODE_DEF_ID: [],
FILTER_INSTEON_TYPE: [],
FILTER_ZWAVE_CAT: [],
},
LIGHT: {
FILTER_UOM: ["51"],
FILTER_STATES: ["on", "off", "%"],
FILTER_NODE_DEF_ID: [
"BallastRelayLampSwitch",
"BallastRelayLampSwitch_ADV",
"DimmerLampOnly",
"DimmerLampSwitch",
"DimmerLampSwitch_ADV",
"DimmerSwitchOnly",
"DimmerSwitchOnly_ADV",
"KeypadDimmer",
"KeypadDimmer_ADV",
],
FILTER_INSTEON_TYPE: [TYPE_CATEGORY_DIMMABLE],
FILTER_ZWAVE_CAT: ["109", "119"],
},
SWITCH: {
FILTER_UOM: ["2", "78"],
FILTER_STATES: ["on", "off"],
FILTER_NODE_DEF_ID: [
"AlertModuleArmed",
"AlertModuleSiren",
"AlertModuleSiren_ADV",
"EZIO2x4_Output",
"EZRAIN_Output",
"KeypadRelay",
"KeypadRelay_ADV",
"RelayLampOnly",
"RelayLampOnly_ADV",
"RelayLampSwitch",
"RelayLampSwitch_ADV",
"RelaySwitchOnlyPlusQuery",
"RelaySwitchOnlyPlusQuery_ADV",
"Siren",
"Siren_ADV",
"X10",
],
FILTER_INSTEON_TYPE: [
TYPE_CATEGORY_SWITCHED,
"7.3.255.",
"9.10.",
"9.11.",
TYPE_CATEGORY_X10,
],
FILTER_ZWAVE_CAT: ["121", "122", "123", "137", "141", "147"],
},
CLIMATE: {
FILTER_UOM: ["2"],
FILTER_STATES: ["heating", "cooling", "idle", "fan_only", "off"],
FILTER_NODE_DEF_ID: ["TempLinc", "Thermostat"],
FILTER_INSTEON_TYPE: ["4.8", TYPE_CATEGORY_CLIMATE],
FILTER_ZWAVE_CAT: ["140"],
},
}
UOM_ISYV4_DEGREES = "degrees"
UOM_ISYV4_NONE = "n/a"
UOM_ISY_CELSIUS = 1
UOM_ISY_FAHRENHEIT = 2
UOM_DOUBLE_TEMP = "101"
UOM_HVAC_ACTIONS = "66"
UOM_HVAC_MODE_GENERIC = "67"
UOM_HVAC_MODE_INSTEON = "98"
UOM_FAN_MODES = "99"
UOM_INDEX = "25"
UOM_ON_OFF = "2"
UOM_FRIENDLY_NAME = {
"1": "A",
"3": f"btu/{TIME_HOURS}",
"4": TEMP_CELSIUS,
"5": LENGTH_CENTIMETERS,
"6": "ft³",
"7": f"ft³/{TIME_MINUTES}",
"8": "m³",
"9": TIME_DAYS,
"10": TIME_DAYS,
"12": "dB",
"13": "dB A",
"14": DEGREE,
"16": "macroseismic",
"17": TEMP_FAHRENHEIT,
"18": LENGTH_FEET,
"19": TIME_HOURS,
"20": TIME_HOURS,
"21": "%AH",
"22": "%RH",
"23": PRESSURE_INHG,
"24": f"{LENGTH_INCHES}/{TIME_HOURS}",
UOM_INDEX: "index", # Index type. Use "node.formatted" for value
"26": TEMP_KELVIN,
"27": "keyword",
"28": MASS_KILOGRAMS,
"29": "kV",
"30": "kW",
"31": "kPa",
"32": SPEED_KILOMETERS_PER_HOUR,
"33": ENERGY_KILO_WATT_HOUR,
"34": "liedu",
"35": VOLUME_LITERS,
"36": "lx",
"37": "mercalli",
"38": LENGTH_METERS,
"39": f"{LENGTH_METERS}³/{TIME_HOURS}",
"40": SPEED_METERS_PER_SECOND,
"41": "mA",
"42": TIME_MILLISECONDS,
"43": "mV",
"44": TIME_MINUTES,
"45": TIME_MINUTES,
"46": f"mm/{TIME_HOURS}",
"47": TIME_MONTHS,
"48": SPEED_MILES_PER_HOUR,
"49": SPEED_METERS_PER_SECOND,
"50": "Ω",
"51": UNIT_PERCENTAGE,
"52": MASS_POUNDS,
"53": "pf",
"54": CONCENTRATION_PARTS_PER_MILLION,
"55": "pulse count",
"57": TIME_SECONDS,
"58": TIME_SECONDS,
"59": "S/m",
"60": "m_b",
"61": "M_L",
"62": "M_w",
"63": "M_S",
"64": "shindo",
"65": "SML",
"69": VOLUME_GALLONS,
"71": UV_INDEX,
"72": VOLT,
"73": POWER_WATT,
"74": f"{POWER_WATT}/{LENGTH_METERS}²",
"75": "weekday",
"76": DEGREE,
"77": TIME_YEARS,
"82": "mm",
"83": LENGTH_KILOMETERS,
"85": "Ω",
"86": "kΩ",
"87": f"{LENGTH_METERS}³/{LENGTH_METERS}³",
"88": "Water activity",
"89": "RPM",
"90": FREQUENCY_HERTZ,
"91": DEGREE,
"92": f"{DEGREE} South",
"100": "", # Range 0-255, no unit.
UOM_DOUBLE_TEMP: UOM_DOUBLE_TEMP,
"102": "kWs",
"103": "$",
"104": "¢",
"105": LENGTH_INCHES,
"106": f"mm/{TIME_DAYS}",
"107": "", # raw 1-byte unsigned value
"108": "", # raw 2-byte unsigned value
"109": "", # raw 3-byte unsigned value
"110": "", # raw 4-byte unsigned value
"111": "", # raw 1-byte signed value
"112": "", # raw 2-byte signed value
"113": "", # raw 3-byte signed value
"114": "", # raw 4-byte signed value
"116": LENGTH_MILES,
"117": "mbar",
"118": "hPa",
"119": f"{POWER_WATT}{TIME_HOURS}",
"120": f"{LENGTH_INCHES}/{TIME_DAYS}",
}
UOM_TO_STATES = {
"11": { # Deadbolt Status
0: STATE_UNLOCKED,
100: STATE_LOCKED,
101: STATE_UNKNOWN,
102: STATE_PROBLEM,
},
"15": { # Door Lock Alarm
1: "master code changed",
2: "tamper code entry limit",
3: "escutcheon removed",
4: "key/manually locked",
5: "locked by touch",
6: "key/manually unlocked",
7: "remote locking jammed bolt",
8: "remotely locked",
9: "remotely unlocked",
10: "deadbolt jammed",
11: "battery too low to operate",
12: "critical low battery",
13: "low battery",
14: "automatically locked",
15: "automatic locking jammed bolt",
16: "remotely power cycled",
17: "lock handling complete",
19: "user deleted",
20: "user added",
21: "duplicate pin",
22: "jammed bolt by locking with keypad",
23: "locked by keypad",
24: "unlocked by keypad",
25: "keypad attempt outside schedule",
26: "hardware failure",
27: "factory reset",
},
UOM_HVAC_ACTIONS: { # Thermostat Heat/Cool State
0: CURRENT_HVAC_IDLE,
1: CURRENT_HVAC_HEAT,
2: CURRENT_HVAC_COOL,
3: CURRENT_HVAC_FAN,
4: CURRENT_HVAC_HEAT, # Pending Heat
5: CURRENT_HVAC_COOL, # Pending Cool
# >6 defined in ISY but not implemented, leaving for future expanision.
6: CURRENT_HVAC_IDLE,
7: CURRENT_HVAC_HEAT,
8: CURRENT_HVAC_HEAT,
9: CURRENT_HVAC_COOL,
10: CURRENT_HVAC_HEAT,
11: CURRENT_HVAC_HEAT,
},
UOM_HVAC_MODE_GENERIC: { # Thermostat Mode
0: HVAC_MODE_OFF,
1: HVAC_MODE_HEAT,
2: HVAC_MODE_COOL,
3: HVAC_MODE_AUTO,
4: PRESET_BOOST,
5: "resume",
6: HVAC_MODE_FAN_ONLY,
7: "furnace",
8: HVAC_MODE_DRY,
9: "moist air",
10: "auto changeover",
11: "energy save heat",
12: "energy save cool",
13: PRESET_AWAY,
14: HVAC_MODE_AUTO,
15: HVAC_MODE_AUTO,
16: HVAC_MODE_AUTO,
},
"68": { # Thermostat Fan Mode
0: FAN_AUTO,
1: FAN_ON,
2: FAN_HIGH, # Auto High
3: FAN_HIGH,
4: FAN_MEDIUM, # Auto Medium
5: FAN_MEDIUM,
6: "circulation",
7: "humidity circulation",
},
"78": {0: STATE_OFF, 100: STATE_ON}, # 0-Off 100-On
"79": {0: STATE_OPEN, 100: STATE_CLOSED}, # 0-Open 100-Close
"80": { # Thermostat Fan Run State
0: STATE_OFF,
1: STATE_ON,
2: "on high",
3: "on medium",
4: "circulation",
5: "humidity circulation",
6: "right/left circulation",
7: "up/down circulation",
8: "quiet circulation",
},
"84": {0: SERVICE_LOCK, 1: SERVICE_UNLOCK}, # Secure Mode
"93": { # Power Management Alarm
1: "power applied",
2: "ac mains disconnected",
3: "ac mains reconnected",
4: "surge detection",
5: "volt drop or drift",
6: "over current detected",
7: "over voltage detected",
8: "over load detected",
9: "load error",
10: "replace battery soon",
11: "replace battery now",
12: "battery is charging",
13: "battery is fully charged",
14: "charge battery soon",
15: "charge battery now",
},
"94": { # Appliance Alarm
1: "program started",
2: "program in progress",
3: "program completed",
4: "replace main filter",
5: "failure to set target temperature",
6: "supplying water",
7: "water supply failure",
8: "boiling",
9: "boiling failure",
10: "washing",
11: "washing failure",
12: "rinsing",
13: "rinsing failure",
14: "draining",
15: "draining failure",
16: "spinning",
17: "spinning failure",
18: "drying",
19: "drying failure",
20: "fan failure",
21: "compressor failure",
},
"95": { # Home Health Alarm
1: "leaving bed",
2: "sitting on bed",
3: "lying on bed",
4: "posture changed",
5: "sitting on edge of bed",
},
"96": { # VOC Level
1: "clean",
2: "slightly polluted",
3: "moderately polluted",
4: "highly polluted",
},
"97": { # Barrier Status
**{
0: STATE_CLOSED,
100: STATE_OPEN,
101: STATE_UNKNOWN,
102: "stopped",
103: STATE_CLOSING,
104: STATE_OPENING,
},
**{
b: f"{b} %" for a, b in enumerate(list(range(1, 100)))
}, # 1-99 are percentage open
},
UOM_HVAC_MODE_INSTEON: { # Insteon Thermostat Mode
0: HVAC_MODE_OFF,
1: HVAC_MODE_HEAT,
2: HVAC_MODE_COOL,
3: HVAC_MODE_HEAT_COOL,
4: HVAC_MODE_FAN_ONLY,
5: HVAC_MODE_AUTO, # Program Auto
6: HVAC_MODE_AUTO, # Program Heat-Set @ Local Device Only
7: HVAC_MODE_AUTO, # Program Cool-Set @ Local Device Only
},
UOM_FAN_MODES: {7: FAN_ON, 8: FAN_AUTO}, # Insteon Thermostat Fan Mode
"115": { # Most recent On style action taken for lamp control
0: "on",
1: "off",
2: "fade up",
3: "fade down",
4: "fade stop",
5: "fast on",
6: "fast off",
7: "triple press on",
8: "triple press off",
9: "4x press on",
10: "4x press off",
11: "5x press on",
12: "5x press off",
},
}
ISY_HVAC_MODES = [
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_AUTO,
HVAC_MODE_FAN_ONLY,
]
HA_HVAC_TO_ISY = {
HVAC_MODE_OFF: "off",
HVAC_MODE_HEAT: "heat",
HVAC_MODE_COOL: "cool",
HVAC_MODE_HEAT_COOL: "auto",
HVAC_MODE_FAN_ONLY: "fan_only",
HVAC_MODE_AUTO: "program_auto",
}
HA_FAN_TO_ISY = {FAN_ON: "on", FAN_AUTO: "auto"}
BINARY_SENSOR_DEVICE_TYPES_ISY = {
DEVICE_CLASS_MOISTURE: ["16.8.", "16.13.", "16.14."],
DEVICE_CLASS_OPENING: [
"16.9.",
"16.6.",
"16.7.",
"16.2.",
"16.17.",
"16.20.",
"16.21.",
],
DEVICE_CLASS_MOTION: ["16.1.", "16.4.", "16.5.", "16.3.", "16.22."],
}
BINARY_SENSOR_DEVICE_TYPES_ZWAVE = {
DEVICE_CLASS_SAFETY: ["137", "172", "176", "177", "178"],
DEVICE_CLASS_SMOKE: ["138", "156"],
DEVICE_CLASS_PROBLEM: ["148", "149", "157", "158", "164", "174", "175"],
DEVICE_CLASS_GAS: ["150", "151"],
DEVICE_CLASS_SOUND: ["153"],
DEVICE_CLASS_COLD: ["152", "168"],
DEVICE_CLASS_HEAT: ["154", "166", "167"],
DEVICE_CLASS_MOISTURE: ["159", "169"],
DEVICE_CLASS_DOOR: ["160"],
DEVICE_CLASS_BATTERY: ["162"],
DEVICE_CLASS_MOTION: ["155"],
DEVICE_CLASS_VIBRATION: ["173"],
}
| 28.229938
| 87
| 0.583939
|
import logging
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_COLD,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_GAS,
DEVICE_CLASS_HEAT,
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_OPENING,
DEVICE_CLASS_PROBLEM,
DEVICE_CLASS_SAFETY,
DEVICE_CLASS_SMOKE,
DEVICE_CLASS_SOUND,
DEVICE_CLASS_VIBRATION,
DOMAIN as BINARY_SENSOR,
)
from homeassistant.components.climate.const import (
CURRENT_HVAC_COOL,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
DOMAIN as CLIMATE,
FAN_AUTO,
FAN_HIGH,
FAN_MEDIUM,
FAN_ON,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
)
from homeassistant.components.cover import DOMAIN as COVER
from homeassistant.components.fan import DOMAIN as FAN
from homeassistant.components.light import DOMAIN as LIGHT
from homeassistant.components.lock import DOMAIN as LOCK
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
from homeassistant.const import (
CONCENTRATION_PARTS_PER_MILLION,
DEGREE,
ENERGY_KILO_WATT_HOUR,
FREQUENCY_HERTZ,
LENGTH_CENTIMETERS,
LENGTH_FEET,
LENGTH_INCHES,
LENGTH_KILOMETERS,
LENGTH_METERS,
LENGTH_MILES,
MASS_KILOGRAMS,
MASS_POUNDS,
POWER_WATT,
PRESSURE_INHG,
SERVICE_LOCK,
SERVICE_UNLOCK,
SPEED_KILOMETERS_PER_HOUR,
SPEED_METERS_PER_SECOND,
SPEED_MILES_PER_HOUR,
STATE_CLOSED,
STATE_CLOSING,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_OPEN,
STATE_OPENING,
STATE_PROBLEM,
STATE_UNKNOWN,
STATE_UNLOCKED,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
TEMP_KELVIN,
TIME_DAYS,
TIME_HOURS,
TIME_MILLISECONDS,
TIME_MINUTES,
TIME_MONTHS,
TIME_SECONDS,
TIME_YEARS,
UNIT_PERCENTAGE,
UV_INDEX,
VOLT,
VOLUME_GALLONS,
VOLUME_LITERS,
)
_LOGGER = logging.getLogger(__package__)
DOMAIN = "isy994"
MANUFACTURER = "Universal Devices, Inc"
CONF_IGNORE_STRING = "ignore_string"
CONF_SENSOR_STRING = "sensor_string"
CONF_VAR_SENSOR_STRING = "variable_sensor_string"
CONF_TLS_VER = "tls"
CONF_RESTORE_LIGHT_STATE = "restore_light_state"
DEFAULT_IGNORE_STRING = "{IGNORE ME}"
DEFAULT_SENSOR_STRING = "sensor"
DEFAULT_RESTORE_LIGHT_STATE = False
DEFAULT_TLS_VERSION = 1.1
DEFAULT_PROGRAM_STRING = "HA."
DEFAULT_VAR_SENSOR_STRING = "HA."
KEY_ACTIONS = "actions"
KEY_STATUS = "status"
SUPPORTED_PLATFORMS = [BINARY_SENSOR, SENSOR, LOCK, FAN, COVER, LIGHT, SWITCH, CLIMATE]
SUPPORTED_PROGRAM_PLATFORMS = [BINARY_SENSOR, LOCK, FAN, COVER, SWITCH]
SUPPORTED_BIN_SENS_CLASSES = ["moisture", "opening", "motion", "climate"]
ISY_GROUP_PLATFORM = SWITCH
ISY994_ISY = "isy"
ISY994_NODES = "isy994_nodes"
ISY994_PROGRAMS = "isy994_programs"
ISY994_VARIABLES = "isy994_variables"
FILTER_UOM = "uom"
FILTER_STATES = "states"
FILTER_NODE_DEF_ID = "node_def_id"
FILTER_INSTEON_TYPE = "insteon_type"
FILTER_ZWAVE_CAT = "zwave_cat"
SUBNODE_CLIMATE_COOL = 2
SUBNODE_CLIMATE_HEAT = 3
SUBNODE_DUSK_DAWN = 2
SUBNODE_EZIO2X4_SENSORS = [9, 10, 11, 12]
SUBNODE_FANLINC_LIGHT = 1
SUBNODE_HEARTBEAT = 4
SUBNODE_IOLINC_RELAY = 2
SUBNODE_LOW_BATTERY = 3
SUBNODE_MOTION_DISABLED = (13, 19) SUBNODE_NEGATIVE = 2
SUBNODE_TAMPER = (10, 16)
TYPE_CATEGORY_CONTROLLERS = "0."
TYPE_CATEGORY_DIMMABLE = "1."
TYPE_CATEGORY_SWITCHED = "2."
TYPE_CATEGORY_IRRIGATION = "4."
TYPE_CATEGORY_CLIMATE = "5."
TYPE_CATEGORY_POOL_CTL = "6."
TYPE_CATEGORY_SENSOR_ACTUATORS = "7."
TYPE_CATEGORY_ENERGY_MGMT = "9."
TYPE_CATEGORY_COVER = "14."
TYPE_CATEGORY_LOCK = "15."
TYPE_CATEGORY_SAFETY = "16."
TYPE_CATEGORY_X10 = "113."
TYPE_EZIO2X4 = "7.3.255."
TYPE_INSTEON_MOTION = ("16.1.", "16.22.")
UNDO_UPDATE_LISTENER = "undo_update_listener"
UDN_UUID_PREFIX = "uuid:"
ISY_URL_POSTFIX = "/desc"
# responses, not using them for Home Assistant states
# Insteon Types: https://www.universal-devices.com/developers/wsdk/5.0.4/1_fam.xml
# Z-Wave Categories: https://www.universal-devices.com/developers/wsdk/5.0.4/4_fam.xml
NODE_FILTERS = {
BINARY_SENSOR: {
FILTER_UOM: [],
FILTER_STATES: [],
FILTER_NODE_DEF_ID: [
"BinaryAlarm",
"BinaryAlarm_ADV",
"BinaryControl",
"BinaryControl_ADV",
"EZIO2x4_Input",
"EZRAIN_Input",
"OnOffControl",
"OnOffControl_ADV",
],
FILTER_INSTEON_TYPE: [
"7.0.",
"7.13.",
TYPE_CATEGORY_SAFETY,
], # Does a startswith() match; include the dot
FILTER_ZWAVE_CAT: (["104", "112", "138"] + list(map(str, range(148, 180)))),
},
SENSOR: {
# This is just a more-readable way of including MOST uoms between 1-100
# (Remember that range() is non-inclusive of the stop value)
FILTER_UOM: (
["1"]
+ list(map(str, range(3, 11)))
+ list(map(str, range(12, 51)))
+ list(map(str, range(52, 66)))
+ list(map(str, range(69, 78)))
+ ["79"]
+ list(map(str, range(82, 97)))
),
FILTER_STATES: [],
FILTER_NODE_DEF_ID: [
"IMETER_SOLO",
"EZIO2x4_Input_ADV",
"KeypadButton",
"KeypadButton_ADV",
"RemoteLinc2",
"RemoteLinc2_ADV",
],
FILTER_INSTEON_TYPE: ["0.16.", "0.17.", "0.18.", "9.0.", "9.7."],
FILTER_ZWAVE_CAT: (["118", "143"] + list(map(str, range(180, 185)))),
},
LOCK: {
FILTER_UOM: ["11"],
FILTER_STATES: ["locked", "unlocked"],
FILTER_NODE_DEF_ID: ["DoorLock"],
FILTER_INSTEON_TYPE: [TYPE_CATEGORY_LOCK, "4.64."],
FILTER_ZWAVE_CAT: ["111"],
},
FAN: {
FILTER_UOM: [],
FILTER_STATES: ["off", "low", "med", "high"],
FILTER_NODE_DEF_ID: ["FanLincMotor"],
FILTER_INSTEON_TYPE: ["1.46."],
FILTER_ZWAVE_CAT: [],
},
COVER: {
FILTER_UOM: ["97"],
FILTER_STATES: ["open", "closed", "closing", "opening", "stopped"],
FILTER_NODE_DEF_ID: [],
FILTER_INSTEON_TYPE: [],
FILTER_ZWAVE_CAT: [],
},
LIGHT: {
FILTER_UOM: ["51"],
FILTER_STATES: ["on", "off", "%"],
FILTER_NODE_DEF_ID: [
"BallastRelayLampSwitch",
"BallastRelayLampSwitch_ADV",
"DimmerLampOnly",
"DimmerLampSwitch",
"DimmerLampSwitch_ADV",
"DimmerSwitchOnly",
"DimmerSwitchOnly_ADV",
"KeypadDimmer",
"KeypadDimmer_ADV",
],
FILTER_INSTEON_TYPE: [TYPE_CATEGORY_DIMMABLE],
FILTER_ZWAVE_CAT: ["109", "119"],
},
SWITCH: {
FILTER_UOM: ["2", "78"],
FILTER_STATES: ["on", "off"],
FILTER_NODE_DEF_ID: [
"AlertModuleArmed",
"AlertModuleSiren",
"AlertModuleSiren_ADV",
"EZIO2x4_Output",
"EZRAIN_Output",
"KeypadRelay",
"KeypadRelay_ADV",
"RelayLampOnly",
"RelayLampOnly_ADV",
"RelayLampSwitch",
"RelayLampSwitch_ADV",
"RelaySwitchOnlyPlusQuery",
"RelaySwitchOnlyPlusQuery_ADV",
"Siren",
"Siren_ADV",
"X10",
],
FILTER_INSTEON_TYPE: [
TYPE_CATEGORY_SWITCHED,
"7.3.255.",
"9.10.",
"9.11.",
TYPE_CATEGORY_X10,
],
FILTER_ZWAVE_CAT: ["121", "122", "123", "137", "141", "147"],
},
CLIMATE: {
FILTER_UOM: ["2"],
FILTER_STATES: ["heating", "cooling", "idle", "fan_only", "off"],
FILTER_NODE_DEF_ID: ["TempLinc", "Thermostat"],
FILTER_INSTEON_TYPE: ["4.8", TYPE_CATEGORY_CLIMATE],
FILTER_ZWAVE_CAT: ["140"],
},
}
UOM_ISYV4_DEGREES = "degrees"
UOM_ISYV4_NONE = "n/a"
UOM_ISY_CELSIUS = 1
UOM_ISY_FAHRENHEIT = 2
UOM_DOUBLE_TEMP = "101"
UOM_HVAC_ACTIONS = "66"
UOM_HVAC_MODE_GENERIC = "67"
UOM_HVAC_MODE_INSTEON = "98"
UOM_FAN_MODES = "99"
UOM_INDEX = "25"
UOM_ON_OFF = "2"
UOM_FRIENDLY_NAME = {
"1": "A",
"3": f"btu/{TIME_HOURS}",
"4": TEMP_CELSIUS,
"5": LENGTH_CENTIMETERS,
"6": "ft³",
"7": f"ft³/{TIME_MINUTES}",
"8": "m³",
"9": TIME_DAYS,
"10": TIME_DAYS,
"12": "dB",
"13": "dB A",
"14": DEGREE,
"16": "macroseismic",
"17": TEMP_FAHRENHEIT,
"18": LENGTH_FEET,
"19": TIME_HOURS,
"20": TIME_HOURS,
"21": "%AH",
"22": "%RH",
"23": PRESSURE_INHG,
"24": f"{LENGTH_INCHES}/{TIME_HOURS}",
UOM_INDEX: "index", # Index type. Use "node.formatted" for value
"26": TEMP_KELVIN,
"27": "keyword",
"28": MASS_KILOGRAMS,
"29": "kV",
"30": "kW",
"31": "kPa",
"32": SPEED_KILOMETERS_PER_HOUR,
"33": ENERGY_KILO_WATT_HOUR,
"34": "liedu",
"35": VOLUME_LITERS,
"36": "lx",
"37": "mercalli",
"38": LENGTH_METERS,
"39": f"{LENGTH_METERS}³/{TIME_HOURS}",
"40": SPEED_METERS_PER_SECOND,
"41": "mA",
"42": TIME_MILLISECONDS,
"43": "mV",
"44": TIME_MINUTES,
"45": TIME_MINUTES,
"46": f"mm/{TIME_HOURS}",
"47": TIME_MONTHS,
"48": SPEED_MILES_PER_HOUR,
"49": SPEED_METERS_PER_SECOND,
"50": "Ω",
"51": UNIT_PERCENTAGE,
"52": MASS_POUNDS,
"53": "pf",
"54": CONCENTRATION_PARTS_PER_MILLION,
"55": "pulse count",
"57": TIME_SECONDS,
"58": TIME_SECONDS,
"59": "S/m",
"60": "m_b",
"61": "M_L",
"62": "M_w",
"63": "M_S",
"64": "shindo",
"65": "SML",
"69": VOLUME_GALLONS,
"71": UV_INDEX,
"72": VOLT,
"73": POWER_WATT,
"74": f"{POWER_WATT}/{LENGTH_METERS}²",
"75": "weekday",
"76": DEGREE,
"77": TIME_YEARS,
"82": "mm",
"83": LENGTH_KILOMETERS,
"85": "Ω",
"86": "kΩ",
"87": f"{LENGTH_METERS}³/{LENGTH_METERS}³",
"88": "Water activity",
"89": "RPM",
"90": FREQUENCY_HERTZ,
"91": DEGREE,
"92": f"{DEGREE} South",
"100": "", # Range 0-255, no unit.
UOM_DOUBLE_TEMP: UOM_DOUBLE_TEMP,
"102": "kWs",
"103": "$",
"104": "¢",
"105": LENGTH_INCHES,
"106": f"mm/{TIME_DAYS}",
"107": "", # raw 1-byte unsigned value
"108": "", # raw 2-byte unsigned value
"109": "", # raw 3-byte unsigned value
"110": "", # raw 4-byte unsigned value
"111": "", # raw 1-byte signed value
"112": "", # raw 2-byte signed value
"113": "", # raw 3-byte signed value
"114": "", # raw 4-byte signed value
"116": LENGTH_MILES,
"117": "mbar",
"118": "hPa",
"119": f"{POWER_WATT}{TIME_HOURS}",
"120": f"{LENGTH_INCHES}/{TIME_DAYS}",
}
UOM_TO_STATES = {
"11": { # Deadbolt Status
0: STATE_UNLOCKED,
100: STATE_LOCKED,
101: STATE_UNKNOWN,
102: STATE_PROBLEM,
},
"15": { # Door Lock Alarm
1: "master code changed",
2: "tamper code entry limit",
3: "escutcheon removed",
4: "key/manually locked",
5: "locked by touch",
6: "key/manually unlocked",
7: "remote locking jammed bolt",
8: "remotely locked",
9: "remotely unlocked",
10: "deadbolt jammed",
11: "battery too low to operate",
12: "critical low battery",
13: "low battery",
14: "automatically locked",
15: "automatic locking jammed bolt",
16: "remotely power cycled",
17: "lock handling complete",
19: "user deleted",
20: "user added",
21: "duplicate pin",
22: "jammed bolt by locking with keypad",
23: "locked by keypad",
24: "unlocked by keypad",
25: "keypad attempt outside schedule",
26: "hardware failure",
27: "factory reset",
},
UOM_HVAC_ACTIONS: { # Thermostat Heat/Cool State
0: CURRENT_HVAC_IDLE,
1: CURRENT_HVAC_HEAT,
2: CURRENT_HVAC_COOL,
3: CURRENT_HVAC_FAN,
4: CURRENT_HVAC_HEAT, # Pending Heat
5: CURRENT_HVAC_COOL, # Pending Cool
# >6 defined in ISY but not implemented, leaving for future expanision.
6: CURRENT_HVAC_IDLE,
7: CURRENT_HVAC_HEAT,
8: CURRENT_HVAC_HEAT,
9: CURRENT_HVAC_COOL,
10: CURRENT_HVAC_HEAT,
11: CURRENT_HVAC_HEAT,
},
UOM_HVAC_MODE_GENERIC: { # Thermostat Mode
0: HVAC_MODE_OFF,
1: HVAC_MODE_HEAT,
2: HVAC_MODE_COOL,
3: HVAC_MODE_AUTO,
4: PRESET_BOOST,
5: "resume",
6: HVAC_MODE_FAN_ONLY,
7: "furnace",
8: HVAC_MODE_DRY,
9: "moist air",
10: "auto changeover",
11: "energy save heat",
12: "energy save cool",
13: PRESET_AWAY,
14: HVAC_MODE_AUTO,
15: HVAC_MODE_AUTO,
16: HVAC_MODE_AUTO,
},
"68": { # Thermostat Fan Mode
0: FAN_AUTO,
1: FAN_ON,
2: FAN_HIGH, # Auto High
3: FAN_HIGH,
4: FAN_MEDIUM, # Auto Medium
5: FAN_MEDIUM,
6: "circulation",
7: "humidity circulation",
},
"78": {0: STATE_OFF, 100: STATE_ON}, # 0-Off 100-On
"79": {0: STATE_OPEN, 100: STATE_CLOSED}, # 0-Open 100-Close
"80": { # Thermostat Fan Run State
0: STATE_OFF,
1: STATE_ON,
2: "on high",
3: "on medium",
4: "circulation",
5: "humidity circulation",
6: "right/left circulation",
7: "up/down circulation",
8: "quiet circulation",
},
"84": {0: SERVICE_LOCK, 1: SERVICE_UNLOCK}, # Secure Mode
"93": { # Power Management Alarm
1: "power applied",
2: "ac mains disconnected",
3: "ac mains reconnected",
4: "surge detection",
5: "volt drop or drift",
6: "over current detected",
7: "over voltage detected",
8: "over load detected",
9: "load error",
10: "replace battery soon",
11: "replace battery now",
12: "battery is charging",
13: "battery is fully charged",
14: "charge battery soon",
15: "charge battery now",
},
"94": { # Appliance Alarm
1: "program started",
2: "program in progress",
3: "program completed",
4: "replace main filter",
5: "failure to set target temperature",
6: "supplying water",
7: "water supply failure",
8: "boiling",
9: "boiling failure",
10: "washing",
11: "washing failure",
12: "rinsing",
13: "rinsing failure",
14: "draining",
15: "draining failure",
16: "spinning",
17: "spinning failure",
18: "drying",
19: "drying failure",
20: "fan failure",
21: "compressor failure",
},
"95": { # Home Health Alarm
1: "leaving bed",
2: "sitting on bed",
3: "lying on bed",
4: "posture changed",
5: "sitting on edge of bed",
},
"96": { # VOC Level
1: "clean",
2: "slightly polluted",
3: "moderately polluted",
4: "highly polluted",
},
"97": { # Barrier Status
**{
0: STATE_CLOSED,
100: STATE_OPEN,
101: STATE_UNKNOWN,
102: "stopped",
103: STATE_CLOSING,
104: STATE_OPENING,
},
**{
b: f"{b} %" for a, b in enumerate(list(range(1, 100)))
}, # 1-99 are percentage open
},
UOM_HVAC_MODE_INSTEON: { # Insteon Thermostat Mode
0: HVAC_MODE_OFF,
1: HVAC_MODE_HEAT,
2: HVAC_MODE_COOL,
3: HVAC_MODE_HEAT_COOL,
4: HVAC_MODE_FAN_ONLY,
5: HVAC_MODE_AUTO, # Program Auto
6: HVAC_MODE_AUTO, # Program Heat-Set @ Local Device Only
7: HVAC_MODE_AUTO, # Program Cool-Set @ Local Device Only
},
UOM_FAN_MODES: {7: FAN_ON, 8: FAN_AUTO}, # Insteon Thermostat Fan Mode
"115": { # Most recent On style action taken for lamp control
0: "on",
1: "off",
2: "fade up",
3: "fade down",
4: "fade stop",
5: "fast on",
6: "fast off",
7: "triple press on",
8: "triple press off",
9: "4x press on",
10: "4x press off",
11: "5x press on",
12: "5x press off",
},
}
ISY_HVAC_MODES = [
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_AUTO,
HVAC_MODE_FAN_ONLY,
]
HA_HVAC_TO_ISY = {
HVAC_MODE_OFF: "off",
HVAC_MODE_HEAT: "heat",
HVAC_MODE_COOL: "cool",
HVAC_MODE_HEAT_COOL: "auto",
HVAC_MODE_FAN_ONLY: "fan_only",
HVAC_MODE_AUTO: "program_auto",
}
HA_FAN_TO_ISY = {FAN_ON: "on", FAN_AUTO: "auto"}
BINARY_SENSOR_DEVICE_TYPES_ISY = {
DEVICE_CLASS_MOISTURE: ["16.8.", "16.13.", "16.14."],
DEVICE_CLASS_OPENING: [
"16.9.",
"16.6.",
"16.7.",
"16.2.",
"16.17.",
"16.20.",
"16.21.",
],
DEVICE_CLASS_MOTION: ["16.1.", "16.4.", "16.5.", "16.3.", "16.22."],
}
BINARY_SENSOR_DEVICE_TYPES_ZWAVE = {
DEVICE_CLASS_SAFETY: ["137", "172", "176", "177", "178"],
DEVICE_CLASS_SMOKE: ["138", "156"],
DEVICE_CLASS_PROBLEM: ["148", "149", "157", "158", "164", "174", "175"],
DEVICE_CLASS_GAS: ["150", "151"],
DEVICE_CLASS_SOUND: ["153"],
DEVICE_CLASS_COLD: ["152", "168"],
DEVICE_CLASS_HEAT: ["154", "166", "167"],
DEVICE_CLASS_MOISTURE: ["159", "169"],
DEVICE_CLASS_DOOR: ["160"],
DEVICE_CLASS_BATTERY: ["162"],
DEVICE_CLASS_MOTION: ["155"],
DEVICE_CLASS_VIBRATION: ["173"],
}
| true
| true
|
f7042aae1c5991d42b4fceb79304a0ff5d0e7579
| 398
|
py
|
Python
|
scfmsp/controlflowanalysis/instructions/InstructionJz.py
|
sepidehpouyan/SCF-MSP430
|
1d7565bf38d9f42e775031d4ea8515ff99bef778
|
[
"MIT"
] | 1
|
2020-07-03T21:26:52.000Z
|
2020-07-03T21:26:52.000Z
|
scfmsp/controlflowanalysis/instructions/InstructionJz.py
|
sepidehpouyan/SCF-MSP430
|
1d7565bf38d9f42e775031d4ea8515ff99bef778
|
[
"MIT"
] | null | null | null |
scfmsp/controlflowanalysis/instructions/InstructionJz.py
|
sepidehpouyan/SCF-MSP430
|
1d7565bf38d9f42e775031d4ea8515ff99bef778
|
[
"MIT"
] | null | null | null |
from scfmsp.controlflowanalysis.StatusRegister import StatusRegister
from scfmsp.controlflowanalysis.instructions.AbstractInstructionBranching import AbstractInstructionBranching
class InstructionJz(AbstractInstructionBranching):
name = 'jz'
def get_execution_time(self):
return 2
def get_branching_condition_domain(self, ac):
return ac.sra.get(StatusRegister.ZERO)
| 30.615385
| 109
| 0.806533
|
from scfmsp.controlflowanalysis.StatusRegister import StatusRegister
from scfmsp.controlflowanalysis.instructions.AbstractInstructionBranching import AbstractInstructionBranching
class InstructionJz(AbstractInstructionBranching):
name = 'jz'
def get_execution_time(self):
return 2
def get_branching_condition_domain(self, ac):
return ac.sra.get(StatusRegister.ZERO)
| true
| true
|
f7042b01cfb71a99764931d3f29e9d6ab437938d
| 2,363
|
py
|
Python
|
data_preprocessing/tweet_api.py
|
teomores/kafka-twitter
|
778539c8f2d705c3fc75dfc8e00f9b81750b6d05
|
[
"Apache-2.0"
] | 4
|
2019-09-22T22:03:41.000Z
|
2021-03-17T22:36:25.000Z
|
data_preprocessing/tweet_api.py
|
tmscarla/kafka-twitter
|
29d7c48fd1d225e33ec06be9bfed1826fa4d6b60
|
[
"Apache-2.0"
] | 8
|
2020-03-24T17:31:21.000Z
|
2022-03-11T23:59:52.000Z
|
data_preprocessing/tweet_api.py
|
tmscarla/kafka-twitter
|
29d7c48fd1d225e33ec06be9bfed1826fa4d6b60
|
[
"Apache-2.0"
] | null | null | null |
# Import the Twython class
from twython import Twython
import json
import os
import pandas as pd
from tqdm import tqdm
try:
os.remove('twitter_dataset.csv')
except OSError:
pass
def main():
old_df = pd.read_csv('data/twitter_dataset_2.csv', lineterminator='\n')
#first load the dictonary with the top used english words
with open('improved_dict.txt') as d:
word_list = d.read()
words = word_list.split('\n')
# Dictonary structure with the fields that we are interested in acquire from the tweets
dict_ = {'user': [],
'text': [],
'hashtags': [],
'mentions': []
}
# Instantiate an object
python_tweets = Twython('9Tz9FnZ1PR9AcEvudwC7hqOod', #API Key
'Z7upFmGJZE3oAfcb2ZUmRdEeBJJkkYTQ86PuB3iKgWqXFdMFNo') #API Secret
#each query has a target word
queries = []
for w in words:
query = {'q': w, #the query word
'result_type': 'recent',
'count': 100, #100 tweets, which is the maximum limit admitted by Twitter
'lang': 'en', #we are interested only in english tweets
}
queries.append(query)
#perform the queries to get the tweet and map the JSON in our dictonary
for q in tqdm(queries[:50]):
for status in python_tweets.search(**q)['statuses']:
dict_['user'].append(status['user']['screen_name']) #username
dict_['text'].append(status['text']) #content of the tweet
#this is necessary cuz the hashtags may be null or there can be more than one
#this can easily be done with this magical regular expression
ht = [d['text'] for d in status['entities']['hashtags'] if 'text' in d] #list of hashtags
dict_['hashtags'].append(ht)
#same thing for the mentions
ment = [d['screen_name'] for d in status['entities']['user_mentions'] if 'screen_name' in d] #list of mentions
dict_['mentions'].append(ment)
# Structure data in a pandas DataFrame for easier manipulation
df = pd.DataFrame(dict_)
df = df.append(old_df)
df.to_csv('data/twitter_dataset_2.csv', index=False, encoding='utf-8')
if __name__ == '__main__':
main()
from time import sleep
while True:
sleep(1200)
main()
| 32.819444
| 122
| 0.61532
|
from twython import Twython
import json
import os
import pandas as pd
from tqdm import tqdm
try:
os.remove('twitter_dataset.csv')
except OSError:
pass
def main():
old_df = pd.read_csv('data/twitter_dataset_2.csv', lineterminator='\n')
with open('improved_dict.txt') as d:
word_list = d.read()
words = word_list.split('\n')
dict_ = {'user': [],
'text': [],
'hashtags': [],
'mentions': []
}
python_tweets = Twython('9Tz9FnZ1PR9AcEvudwC7hqOod', 'Z7upFmGJZE3oAfcb2ZUmRdEeBJJkkYTQ86PuB3iKgWqXFdMFNo')
queries = []
for w in words:
query = {'q': w, 'result_type': 'recent',
'count': 100, 'lang': 'en', }
queries.append(query)
for q in tqdm(queries[:50]):
for status in python_tweets.search(**q)['statuses']:
dict_['user'].append(status['user']['screen_name']) dict_['text'].append(status['text'])
ht = [d['text'] for d in status['entities']['hashtags'] if 'text' in d] dict_['hashtags'].append(ht)
ment = [d['screen_name'] for d in status['entities']['user_mentions'] if 'screen_name' in d] dict_['mentions'].append(ment)
df = pd.DataFrame(dict_)
df = df.append(old_df)
df.to_csv('data/twitter_dataset_2.csv', index=False, encoding='utf-8')
if __name__ == '__main__':
main()
from time import sleep
while True:
sleep(1200)
main()
| true
| true
|
f7042c05b0bdadade8cd2ea76a032a0075ad7e9d
| 4,867
|
py
|
Python
|
pygmt/src/grdfilter.py
|
GenericMappingTools/gmt-python
|
c9c44854f0968dead5c8c8b5eaa0cb0b04907aa1
|
[
"BSD-3-Clause"
] | 168
|
2017-03-27T01:13:57.000Z
|
2019-01-19T02:37:36.000Z
|
pygmt/src/grdfilter.py
|
GenericMappingTools/gmt-python
|
c9c44854f0968dead5c8c8b5eaa0cb0b04907aa1
|
[
"BSD-3-Clause"
] | 167
|
2017-07-01T02:26:19.000Z
|
2019-01-22T18:39:13.000Z
|
pygmt/src/grdfilter.py
|
GenericMappingTools/gmt-python
|
c9c44854f0968dead5c8c8b5eaa0cb0b04907aa1
|
[
"BSD-3-Clause"
] | 51
|
2017-06-08T17:39:09.000Z
|
2019-01-16T17:33:11.000Z
|
"""
grdfilter - Filter a grid in the space (or time) domain.
"""
from pygmt.clib import Session
from pygmt.helpers import (
GMTTempFile,
build_arg_string,
fmt_docstring,
kwargs_to_strings,
use_alias,
)
from pygmt.io import load_dataarray
@fmt_docstring
@use_alias(
D="distance",
F="filter",
G="outgrid",
I="spacing",
N="nans",
R="region",
T="toggle",
V="verbose",
f="coltypes",
r="registration",
)
@kwargs_to_strings(I="sequence", R="sequence")
def grdfilter(grid, **kwargs):
r"""
Filter a grid in the space (or time) domain.
Filter a grid file in the time domain using one of the selected convolution
or non-convolution isotropic or rectangular filters and compute distances
using Cartesian or Spherical geometries. The output grid file can
optionally be generated as a sub-region of the input (via ``region``)
and/or with new increment (via ``spacing``) or registration
(via ``toggle``). In this way, one may have "extra space" in the input
data so that the edges will not be used and the output can be within one
half-width of the input edges. If the filter is low-pass, then the output
may be less frequently sampled than the input.
Full option list at :gmt-docs:`grdfilter.html`
{aliases}
Parameters
----------
grid : str or xarray.DataArray
The file name of the input grid or the grid loaded as a DataArray.
outgrid : str or None
The name of the output netCDF file with extension .nc to store the grid
in.
filter : str
**b**\|\ **c**\|\ **g**\|\ **o**\|\ **m**\|\ **p**\|\ **h**\ *xwidth*\
[/*width2*\][*modifiers*].
Name of filter type you which to apply, followed by the width:
b: Box Car
c: Cosine Arch
g: Gaussian
o: Operator
m: Median
p: Maximum Likelihood probability
h: histogram
distance : str
Distance *flag* tells how grid (x,y) relates to filter width as
follows:
p: grid (px,py) with *width* an odd number of pixels; Cartesian
distances.
0: grid (x,y) same units as *width*, Cartesian distances.
1: grid (x,y) in degrees, *width* in kilometers, Cartesian distances.
2: grid (x,y) in degrees, *width* in km, dx scaled by cos(middle y),
Cartesian distances.
The above options are fastest because they allow weight matrix to be
computed only once. The next three options are slower because they
recompute weights for each latitude.
3: grid (x,y) in degrees, *width* in km, dx scaled by cosine(y),
Cartesian distance calculation.
4: grid (x,y) in degrees, *width* in km, Spherical distance
calculation.
5: grid (x,y) in Mercator ``projection='m1'`` img units, *width* in km,
Spherical distance calculation.
{I}
nans : str or float
**i**\|\ **p**\|\ **r**.
Determine how NaN-values in the input grid affects the filtered output.
{R}
toggle : bool
Toggle the node registration for the output grid so as to become the
opposite of the input grid. [Default gives the same registration as the
input grid].
{V}
{f}
{r}
Returns
-------
ret: xarray.DataArray or None
Return type depends on whether the ``outgrid`` parameter is set:
- :class:`xarray.DataArray` if ``outgrid`` is not set
- None if ``outgrid`` is set (grid output will be stored in file set by
``outgrid``)
Example
-------
>>> import os
>>> import pygmt
>>> # Apply a filter of 600km (full width) to the @earth_relief_30m file
>>> # and return a filtered field (saved as netcdf)
>>> pygmt.grdfilter(
... grid="@earth_relief_30m",
... filter="m600",
... distance="4",
... region=[150, 250, 10, 40],
... spacing=0.5,
... outgrid="filtered_pacific.nc",
... )
>>> os.remove("filtered_pacific.nc") # cleanup file
>>> # Apply a gaussian smoothing filter of 600 km in the input data array,
>>> # and returns a filtered data array with the smoothed field.
>>> grid = pygmt.datasets.load_earth_relief()
>>> smooth_field = pygmt.grdfilter(grid=grid, filter="g600", distance="4")
"""
with GMTTempFile(suffix=".nc") as tmpfile:
with Session() as lib:
file_context = lib.virtualfile_from_data(check_kind="raster", data=grid)
with file_context as infile:
if (outgrid := kwargs.get("G")) is None:
kwargs["G"] = outgrid = tmpfile.name # output to tmpfile
lib.call_module("grdfilter", build_arg_string(kwargs, infile=infile))
return load_dataarray(outgrid) if outgrid == tmpfile.name else None
| 31.810458
| 85
| 0.614547
|
from pygmt.clib import Session
from pygmt.helpers import (
GMTTempFile,
build_arg_string,
fmt_docstring,
kwargs_to_strings,
use_alias,
)
from pygmt.io import load_dataarray
@fmt_docstring
@use_alias(
D="distance",
F="filter",
G="outgrid",
I="spacing",
N="nans",
R="region",
T="toggle",
V="verbose",
f="coltypes",
r="registration",
)
@kwargs_to_strings(I="sequence", R="sequence")
def grdfilter(grid, **kwargs):
with GMTTempFile(suffix=".nc") as tmpfile:
with Session() as lib:
file_context = lib.virtualfile_from_data(check_kind="raster", data=grid)
with file_context as infile:
if (outgrid := kwargs.get("G")) is None:
kwargs["G"] = outgrid = tmpfile.name lib.call_module("grdfilter", build_arg_string(kwargs, infile=infile))
return load_dataarray(outgrid) if outgrid == tmpfile.name else None
| true
| true
|
f7042c2ecf2c1579ad078c46d2e6471a39efed06
| 4,251
|
py
|
Python
|
shingetsu/rss.py
|
acemomiage/saku
|
66ab704106d368f7c916f9ba71b28fe9bef62c48
|
[
"BSD-2-Clause"
] | 78
|
2015-01-09T10:49:10.000Z
|
2022-02-16T03:06:28.000Z
|
shingetsu/rss.py
|
acemomiage/saku
|
66ab704106d368f7c916f9ba71b28fe9bef62c48
|
[
"BSD-2-Clause"
] | 5
|
2015-01-11T16:24:33.000Z
|
2019-02-18T15:02:32.000Z
|
shingetsu/rss.py
|
acemomiage/saku
|
66ab704106d368f7c916f9ba71b28fe9bef62c48
|
[
"BSD-2-Clause"
] | 24
|
2015-01-07T08:29:47.000Z
|
2022-03-23T07:22:20.000Z
|
"""Data structure of RSS and useful functions.
"""
#
# Copyright (c) 2005-2020 shinGETsu Project.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
import html
import re
import cgi
from .template import Template
class Item:
"""One item."""
title = ""
link = ""
description = ""
date = 0 # Seconds from 1970-01-01T00:00
def __init__(self, link="", title="", date=0, creator='', subject=None,
description="", content=""):
"""Constructor."""
del_eos = re.compile(r'[\r\n]*')
self.link = link
self.date = date
self.creator = creator
if subject:
self.subject = subject
else:
self.subject = []
self.title = del_eos.sub('', title)
self.description = del_eos.sub('', description)
self.content = content
class RSS(dict):
"""RSS.
It is the dictionary which key is URI.
"""
encode = "utf-8"
lang = "en"
title = ""
parent = "" # Place where is documents or RSS
link = "" # URI of main page
uri = "" # URI of RSS
description = ""
def __init__(self, encode="utf-8", lang="en", title="",
parent="", link="", uri="", description="", xsl=""):
"""Constructor."""
self.encode = encode
self.lang = lang
self.title = title
self.description = description
self.parent = parent
self.xsl = xsl
if parent and parent[-1] != "/":
parent += "/"
self.parent += "/"
if link != "":
self.link = link
else:
self.link = parent
if uri != "":
self.uri = uri
else:
self.uri = parent + "rss.xml"
def append(self, link,
title = "",
date = 0,
creator = '',
subject = None,
description = "",
content = "",
abs = False):
"""Add an item."""
if not abs:
link = self.parent + link
item = Item(link,
title = title,
date = date,
creator = creator,
subject = subject,
description = description,
content = content)
self[link] = item
def keys(self):
"""List of links sorted by date."""
links = list(dict.keys(self))
links.sort(key=lambda x: self[x].date, reverse=True)
return links
def __iter__(self):
return iter(list(self.keys()))
def make_rss1(rss):
'''Generate RSS 1.0.
'''
def w3cdate(date):
from time import strftime, gmtime
return strftime('%Y-%m-%dT%H:%M:%SZ', gmtime(date))
var = {
'rss': rss,
'feed': [rss[uri] for uri in rss],
'w3cdate': w3cdate,
'escape': html.escape,
}
return Template().display('rss1', var)
| 30.148936
| 76
| 0.567866
|
import html
import re
import cgi
from .template import Template
class Item:
title = ""
link = ""
description = ""
date = 0
def __init__(self, link="", title="", date=0, creator='', subject=None,
description="", content=""):
del_eos = re.compile(r'[\r\n]*')
self.link = link
self.date = date
self.creator = creator
if subject:
self.subject = subject
else:
self.subject = []
self.title = del_eos.sub('', title)
self.description = del_eos.sub('', description)
self.content = content
class RSS(dict):
encode = "utf-8"
lang = "en"
title = ""
parent = "" link = "" uri = "" description = ""
def __init__(self, encode="utf-8", lang="en", title="",
parent="", link="", uri="", description="", xsl=""):
self.encode = encode
self.lang = lang
self.title = title
self.description = description
self.parent = parent
self.xsl = xsl
if parent and parent[-1] != "/":
parent += "/"
self.parent += "/"
if link != "":
self.link = link
else:
self.link = parent
if uri != "":
self.uri = uri
else:
self.uri = parent + "rss.xml"
def append(self, link,
title = "",
date = 0,
creator = '',
subject = None,
description = "",
content = "",
abs = False):
if not abs:
link = self.parent + link
item = Item(link,
title = title,
date = date,
creator = creator,
subject = subject,
description = description,
content = content)
self[link] = item
def keys(self):
links = list(dict.keys(self))
links.sort(key=lambda x: self[x].date, reverse=True)
return links
def __iter__(self):
return iter(list(self.keys()))
def make_rss1(rss):
def w3cdate(date):
from time import strftime, gmtime
return strftime('%Y-%m-%dT%H:%M:%SZ', gmtime(date))
var = {
'rss': rss,
'feed': [rss[uri] for uri in rss],
'w3cdate': w3cdate,
'escape': html.escape,
}
return Template().display('rss1', var)
| true
| true
|
f7042cc11b1d56e506098d13c8d748a89e62133e
| 10,272
|
py
|
Python
|
GPy/kern/src/static.py
|
RaulAstudillo/bocf
|
cd84eab2d1b4ea5a4bdeeb452df92296afbafb87
|
[
"BSD-3-Clause"
] | 9
|
2019-06-16T01:18:52.000Z
|
2021-11-03T15:43:55.000Z
|
GPy/kern/src/static.py
|
RaulAstudillo/bocf
|
cd84eab2d1b4ea5a4bdeeb452df92296afbafb87
|
[
"BSD-3-Clause"
] | 3
|
2020-09-09T06:12:51.000Z
|
2021-06-01T23:46:18.000Z
|
GPy/kern/src/static.py
|
RaulAstudillo/bocf
|
cd84eab2d1b4ea5a4bdeeb452df92296afbafb87
|
[
"BSD-3-Clause"
] | 5
|
2019-07-07T13:17:44.000Z
|
2020-09-09T06:06:17.000Z
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .kern import Kern
import numpy as np
from ...core.parameterization import Param
from paramz.transformations import Logexp
from paramz.caching import Cache_this
class Static(Kern):
def __init__(self, input_dim, variance, active_dims, name):
super(Static, self).__init__(input_dim, active_dims, name)
self.variance = Param('variance', variance, Logexp())
self.link_parameters(self.variance)
def _to_dict(self):
input_dict = super(Static, self)._to_dict()
input_dict["variance"] = self.variance.values.tolist()
return input_dict
def Kdiag(self, X):
ret = np.empty((X.shape[0],), dtype=np.float64)
ret[:] = self.variance
return ret
def gradients_X(self, dL_dK, X, X2=None):
return np.zeros(X.shape)
def gradients_X_diag(self, dL_dKdiag, X):
return np.zeros(X.shape)
def gradients_XX(self, dL_dK, X, X2=None):
if X2 is None:
X2 = X
return np.zeros((X.shape[0], X2.shape[0], X.shape[1], X.shape[1]), dtype=np.float64)
def gradients_XX_diag(self, dL_dKdiag, X, cov=False):
return np.zeros((X.shape[0], X.shape[1], X.shape[1]), dtype=np.float64)
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return np.zeros(Z.shape)
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return np.zeros(variational_posterior.shape), np.zeros(variational_posterior.shape)
def psi0(self, Z, variational_posterior):
return self.Kdiag(variational_posterior.mean)
def psi1(self, Z, variational_posterior):
return self.K(variational_posterior.mean, Z)
def psi2(self, Z, variational_posterior):
K = self.K(variational_posterior.mean, Z)
return np.einsum('ij,ik->jk',K,K) #K[:,:,None]*K[:,None,:] # NB. more efficient implementations on inherriting classes
def input_sensitivity(self, summarize=True):
if summarize:
return super(Static, self).input_sensitivity(summarize=summarize)
else:
return np.ones(self.input_dim) * self.variance
class White(Static):
def __init__(self, input_dim, variance=1., active_dims=None, name='white'):
super(White, self).__init__(input_dim, variance, active_dims, name)
def K(self, X, X2=None):
if X2 is None:
return np.eye(X.shape[0])*self.variance
else:
return np.zeros((X.shape[0], X2.shape[0]))
def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None:
self.variance.gradient = np.trace(dL_dK)
else:
self.variance.gradient = 0.
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = dL_dKdiag.sum()
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0.sum()
class WhiteHeteroscedastic(Static):
def __init__(self, input_dim, num_data, variance=1., active_dims=None, name='white_hetero'):
"""
A heteroscedastic White kernel (nugget/noise).
It defines one variance (nugget) per input sample.
Prediction excludes any noise learnt by this Kernel, so be careful using this kernel.
You can plot the errors learnt by this kernel by something similar as:
plt.errorbar(m.X, m.Y, yerr=2*np.sqrt(m.kern.white.variance))
"""
super(Static, self).__init__(input_dim, active_dims, name)
self.variance = Param('variance', np.ones(num_data) * variance, Logexp())
self.link_parameters(self.variance)
def Kdiag(self, X):
if X.shape[0] == self.variance.shape[0]:
# If the input has the same number of samples as
# the number of variances, we return the variances
return self.variance
return 0.
def K(self, X, X2=None):
if X2 is None and X.shape[0] == self.variance.shape[0]:
return np.eye(X.shape[0]) * self.variance
else:
return 0.
def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None:
self.variance.gradient = np.diagonal(dL_dK)
else:
self.variance.gradient = 0.
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = dL_dKdiag
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0
class Bias(Static):
def __init__(self, input_dim, variance=1., active_dims=None, name='bias'):
super(Bias, self).__init__(input_dim, variance, active_dims, name)
def to_dict(self):
input_dict = super(Bias, self)._to_dict()
input_dict["class"] = "GPy.kern.Bias"
return input_dict
@staticmethod
def _from_dict(kernel_class, input_dict):
useGPU = input_dict.pop('useGPU', None)
return Bias(**input_dict)
def K(self, X, X2=None):
shape = (X.shape[0], X.shape[0] if X2 is None else X2.shape[0])
return np.full(shape, self.variance, dtype=np.float64)
def update_gradients_full(self, dL_dK, X, X2=None):
self.variance.gradient = dL_dK.sum()
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = dL_dKdiag.sum()
def psi2(self, Z, variational_posterior):
return np.full((Z.shape[0], Z.shape[0]), self.variance*self.variance*variational_posterior.shape[0], dtype=np.float64)
def psi2n(self, Z, variational_posterior):
ret = np.empty((variational_posterior.mean.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64)
ret[:] = self.variance*self.variance
return ret
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
if dL_dpsi2.ndim == 2:
self.variance.gradient = (dL_dpsi0.sum() + dL_dpsi1.sum()
+ 2.*self.variance*dL_dpsi2.sum()*variational_posterior.shape[0])
else:
self.variance.gradient = (dL_dpsi0.sum() + dL_dpsi1.sum()
+ 2.*self.variance*dL_dpsi2.sum())
class Fixed(Static):
def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='fixed'):
"""
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the variance of the kernel
:type variance: float
"""
super(Fixed, self).__init__(input_dim, variance, active_dims, name)
self.fixed_K = covariance_matrix
def K(self, X, X2):
if X2 is None:
return self.variance * self.fixed_K
else:
return np.zeros((X.shape[0], X2.shape[0]))
def Kdiag(self, X):
return self.variance * self.fixed_K.diagonal()
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None:
self.variance.gradient = np.einsum('ij,ij', dL_dK, self.fixed_K)
else:
self.variance.gradient = 0
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = np.einsum('i,i', dL_dKdiag, np.diagonal(self.fixed_K))
def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0.sum()
class Precomputed(Fixed):
def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='precomputed'):
"""
Class for precomputed kernels, indexed by columns in X
Usage example:
import numpy as np
from GPy.models import GPClassification
from GPy.kern import Precomputed
from sklearn.cross_validation import LeaveOneOut
n = 10
d = 100
X = np.arange(n).reshape((n,1)) # column vector of indices
y = 2*np.random.binomial(1,0.5,(n,1))-1
X0 = np.random.randn(n,d)
k = np.dot(X0,X0.T)
kern = Precomputed(1,k) # k is a n x n covariance matrix
cv = LeaveOneOut(n)
ypred = y.copy()
for train, test in cv:
m = GPClassification(X[train], y[train], kernel=kern)
m.optimize()
ypred[test] = 2*(m.predict(X[test])[0]>0.5)-1
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the variance of the kernel
:type variance: float
"""
assert input_dim==1, "Precomputed only implemented in one dimension. Use multiple Precomputed kernels to have more dimensions by making use of active_dims"
super(Precomputed, self).__init__(input_dim, covariance_matrix, variance, active_dims, name)
@Cache_this(limit=2)
def _index(self, X, X2):
if X2 is None:
i1 = i2 = X.astype('int').flat
else:
i1, i2 = X.astype('int').flat, X2.astype('int').flat
return self.fixed_K[i1,:][:,i2]
def K(self, X, X2=None):
return self.variance * self._index(X, X2)
def Kdiag(self, X):
return self.variance * self._index(X,None).diagonal()
def update_gradients_full(self, dL_dK, X, X2=None):
self.variance.gradient = np.einsum('ij,ij', dL_dK, self._index(X, X2))
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = np.einsum('i,ii', dL_dKdiag, self._index(X, None))
| 38.328358
| 163
| 0.639408
|
from .kern import Kern
import numpy as np
from ...core.parameterization import Param
from paramz.transformations import Logexp
from paramz.caching import Cache_this
class Static(Kern):
def __init__(self, input_dim, variance, active_dims, name):
super(Static, self).__init__(input_dim, active_dims, name)
self.variance = Param('variance', variance, Logexp())
self.link_parameters(self.variance)
def _to_dict(self):
input_dict = super(Static, self)._to_dict()
input_dict["variance"] = self.variance.values.tolist()
return input_dict
def Kdiag(self, X):
ret = np.empty((X.shape[0],), dtype=np.float64)
ret[:] = self.variance
return ret
def gradients_X(self, dL_dK, X, X2=None):
return np.zeros(X.shape)
def gradients_X_diag(self, dL_dKdiag, X):
return np.zeros(X.shape)
def gradients_XX(self, dL_dK, X, X2=None):
if X2 is None:
X2 = X
return np.zeros((X.shape[0], X2.shape[0], X.shape[1], X.shape[1]), dtype=np.float64)
def gradients_XX_diag(self, dL_dKdiag, X, cov=False):
return np.zeros((X.shape[0], X.shape[1], X.shape[1]), dtype=np.float64)
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return np.zeros(Z.shape)
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return np.zeros(variational_posterior.shape), np.zeros(variational_posterior.shape)
def psi0(self, Z, variational_posterior):
return self.Kdiag(variational_posterior.mean)
def psi1(self, Z, variational_posterior):
return self.K(variational_posterior.mean, Z)
def psi2(self, Z, variational_posterior):
K = self.K(variational_posterior.mean, Z)
return np.einsum('ij,ik->jk',K,K)
def input_sensitivity(self, summarize=True):
if summarize:
return super(Static, self).input_sensitivity(summarize=summarize)
else:
return np.ones(self.input_dim) * self.variance
class White(Static):
def __init__(self, input_dim, variance=1., active_dims=None, name='white'):
super(White, self).__init__(input_dim, variance, active_dims, name)
def K(self, X, X2=None):
if X2 is None:
return np.eye(X.shape[0])*self.variance
else:
return np.zeros((X.shape[0], X2.shape[0]))
def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None:
self.variance.gradient = np.trace(dL_dK)
else:
self.variance.gradient = 0.
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = dL_dKdiag.sum()
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0.sum()
class WhiteHeteroscedastic(Static):
def __init__(self, input_dim, num_data, variance=1., active_dims=None, name='white_hetero'):
super(Static, self).__init__(input_dim, active_dims, name)
self.variance = Param('variance', np.ones(num_data) * variance, Logexp())
self.link_parameters(self.variance)
def Kdiag(self, X):
if X.shape[0] == self.variance.shape[0]:
return self.variance
return 0.
def K(self, X, X2=None):
if X2 is None and X.shape[0] == self.variance.shape[0]:
return np.eye(X.shape[0]) * self.variance
else:
return 0.
def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None:
self.variance.gradient = np.diagonal(dL_dK)
else:
self.variance.gradient = 0.
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = dL_dKdiag
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0
class Bias(Static):
def __init__(self, input_dim, variance=1., active_dims=None, name='bias'):
super(Bias, self).__init__(input_dim, variance, active_dims, name)
def to_dict(self):
input_dict = super(Bias, self)._to_dict()
input_dict["class"] = "GPy.kern.Bias"
return input_dict
@staticmethod
def _from_dict(kernel_class, input_dict):
useGPU = input_dict.pop('useGPU', None)
return Bias(**input_dict)
def K(self, X, X2=None):
shape = (X.shape[0], X.shape[0] if X2 is None else X2.shape[0])
return np.full(shape, self.variance, dtype=np.float64)
def update_gradients_full(self, dL_dK, X, X2=None):
self.variance.gradient = dL_dK.sum()
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = dL_dKdiag.sum()
def psi2(self, Z, variational_posterior):
return np.full((Z.shape[0], Z.shape[0]), self.variance*self.variance*variational_posterior.shape[0], dtype=np.float64)
def psi2n(self, Z, variational_posterior):
ret = np.empty((variational_posterior.mean.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64)
ret[:] = self.variance*self.variance
return ret
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
if dL_dpsi2.ndim == 2:
self.variance.gradient = (dL_dpsi0.sum() + dL_dpsi1.sum()
+ 2.*self.variance*dL_dpsi2.sum()*variational_posterior.shape[0])
else:
self.variance.gradient = (dL_dpsi0.sum() + dL_dpsi1.sum()
+ 2.*self.variance*dL_dpsi2.sum())
class Fixed(Static):
def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='fixed'):
super(Fixed, self).__init__(input_dim, variance, active_dims, name)
self.fixed_K = covariance_matrix
def K(self, X, X2):
if X2 is None:
return self.variance * self.fixed_K
else:
return np.zeros((X.shape[0], X2.shape[0]))
def Kdiag(self, X):
return self.variance * self.fixed_K.diagonal()
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None:
self.variance.gradient = np.einsum('ij,ij', dL_dK, self.fixed_K)
else:
self.variance.gradient = 0
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = np.einsum('i,i', dL_dKdiag, np.diagonal(self.fixed_K))
def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0.sum()
class Precomputed(Fixed):
def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='precomputed'):
assert input_dim==1, "Precomputed only implemented in one dimension. Use multiple Precomputed kernels to have more dimensions by making use of active_dims"
super(Precomputed, self).__init__(input_dim, covariance_matrix, variance, active_dims, name)
@Cache_this(limit=2)
def _index(self, X, X2):
if X2 is None:
i1 = i2 = X.astype('int').flat
else:
i1, i2 = X.astype('int').flat, X2.astype('int').flat
return self.fixed_K[i1,:][:,i2]
def K(self, X, X2=None):
return self.variance * self._index(X, X2)
def Kdiag(self, X):
return self.variance * self._index(X,None).diagonal()
def update_gradients_full(self, dL_dK, X, X2=None):
self.variance.gradient = np.einsum('ij,ij', dL_dK, self._index(X, X2))
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = np.einsum('i,ii', dL_dKdiag, self._index(X, None))
| true
| true
|
f7042de1f100aeb375f867dcd8fb140922a67444
| 640
|
py
|
Python
|
setup.py
|
thanakritju/python-slack-events-sdk
|
67bdb55e07fd5c76845bad37ea88e506d42f1b2c
|
[
"MIT"
] | null | null | null |
setup.py
|
thanakritju/python-slack-events-sdk
|
67bdb55e07fd5c76845bad37ea88e506d42f1b2c
|
[
"MIT"
] | null | null | null |
setup.py
|
thanakritju/python-slack-events-sdk
|
67bdb55e07fd5c76845bad37ea88e506d42f1b2c
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="slacksdk",
version="0.0.1a",
author="Thanakrit Juthamongkhon",
author_email="thanakrit.ju.work@gmail.com",
description="A minimal slack sdk",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/thanakritju/python-slack-events-sdk",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 30.47619
| 65
| 0.675
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="slacksdk",
version="0.0.1a",
author="Thanakrit Juthamongkhon",
author_email="thanakrit.ju.work@gmail.com",
description="A minimal slack sdk",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/thanakritju/python-slack-events-sdk",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| true
| true
|
f7042e073beb294e8fce962829b920896e385e2e
| 5,559
|
py
|
Python
|
inside/pipelines/clevr.py
|
jacenkow/inside
|
8b277e2744233a23eb8f55a29417135729fc531d
|
[
"Apache-2.0"
] | 6
|
2020-08-26T13:15:15.000Z
|
2021-08-02T22:07:49.000Z
|
inside/pipelines/clevr.py
|
SLEEP-CO/inside
|
6f860420644b50b78981158a59ceed8cdbd209bf
|
[
"Apache-2.0"
] | 13
|
2020-09-25T22:26:45.000Z
|
2022-03-12T00:47:04.000Z
|
inside/pipelines/clevr.py
|
SLEEP-CO/inside
|
6f860420644b50b78981158a59ceed8cdbd209bf
|
[
"Apache-2.0"
] | 2
|
2020-10-07T17:11:57.000Z
|
2021-05-22T13:20:14.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Grzegorz Jacenków.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Training and evaluation pipeline for the networks."""
import csv
import os
import tensorflow as tf
from tensorflow.keras.metrics import Mean
from inside import config
from inside.callbacks import setup_callbacks
from inside.constructor import setup_comet_ml, setup_model
from inside.loaders import CLEVR
from inside.metrics import DiceScore
def _write_results(logs):
"""Write final logs to a CSV file."""
w = csv.writer(open(os.path.join(
config.EXPERIMENT_FOLDER, "results.csv"), "w"))
for key, val in logs.items():
w.writerow([key, val])
class Pipeline:
def __init__(self):
# Model.
self.model = setup_model()
# Comet.ml experiment.
self.comet_ml = setup_comet_ml()
# Testing metrics.
self.test_dice = DiceScore(name="testing_dice")
self.test_loss = Mean(name="testing_loss")
# Training metrics.
self.training_dice = DiceScore(name="training_dice")
self.training_loss = Mean(name="training_loss")
# Callbacks.
self.cl, self.es, self.mc, self.pp = setup_callbacks()
self.cl.model, self.es.model, self.mc.model = \
self.model, self.model, self.model
self.pp.model = self.model
self.pp.comet_ml = self.comet_ml
def fit(self):
"""Train the model."""
# Toy dataset.
loader = CLEVR()
train_ds, valid_ds, test_ds = loader.load()
with self.comet_ml.train():
self.cl.on_train_begin()
self.es.on_train_begin()
self.mc.on_train_begin()
self.pp.on_train_begin()
for epoch in range(config.EXPERIMENT_EPOCHS):
self.comet_ml.set_epoch(epoch)
for images, labels in train_ds:
self.train_step(images, labels)
for batch, (images, labels) in enumerate(valid_ds):
self.test_step(images, labels)
if not batch: # Log only first mini-batch from an epoch.
self.pp.on_epoch_end(epoch, images, labels)
# Get results.
logs = {
"dice": self.training_dice.result().numpy(),
"loss": self.training_loss.result().numpy(),
"validation_dice": self.test_dice.result().numpy(),
"validation_loss": self.test_loss.result().numpy(),
}
template = ("Epoch {}. Training Loss: {}. Training Dice: {}. "
"Validation Loss: {}. Validation Dice: {}.")
print(template.format(epoch + 1,
logs['loss'],
logs['dice'],
logs['validation_loss'],
logs['validation_dice']))
# Log metrics.
self.comet_ml.log_metrics(logs, epoch=epoch)
self.cl.on_epoch_end(epoch, logs)
self.es.on_epoch_end(epoch, logs)
self.mc.on_epoch_end(epoch, logs)
# Reset the metrics for the next epoch.
self.training_dice.reset_states()
self.training_loss.reset_states()
self.test_dice.reset_states()
self.test_loss.reset_states()
# Early stopping criterion.
if self.es.model.stop_training:
self.cl.on_train_end()
self.es.on_train_end()
self.mc.on_train_end()
break
with self.comet_ml.test():
for batch, (images, labels) in enumerate(test_ds):
self.test_step(images, labels)
if not batch:
self.pp.on_test_end(images, labels)
# Get results.
logs = {
"dice": self.test_dice.result().numpy(),
"loss": self.test_loss.result().numpy(),
}
print("Test Loss: {}. Test Dice: {}.".format(
logs['loss'], logs['dice']))
# Log metrics.
self.comet_ml.log_metrics(logs)
_write_results(logs)
@tf.function
def train_step(self, images, labels):
with tf.GradientTape() as tape:
predictions = self.model.inference(images)
loss = self.model.loss(labels, predictions)
gradients = tape.gradient(loss, self.model.trainable_variables)
self.model.optimiser.apply_gradients(
zip(gradients, self.model.trainable_variables))
self.training_loss(loss)
self.training_dice(labels, predictions)
@tf.function
def test_step(self, images, labels):
predictions = self.model.inference(images)
t_loss = self.model.loss(labels, predictions)
self.test_loss(t_loss)
self.test_dice(labels, predictions)
| 34.314815
| 79
| 0.572585
|
import csv
import os
import tensorflow as tf
from tensorflow.keras.metrics import Mean
from inside import config
from inside.callbacks import setup_callbacks
from inside.constructor import setup_comet_ml, setup_model
from inside.loaders import CLEVR
from inside.metrics import DiceScore
def _write_results(logs):
w = csv.writer(open(os.path.join(
config.EXPERIMENT_FOLDER, "results.csv"), "w"))
for key, val in logs.items():
w.writerow([key, val])
class Pipeline:
def __init__(self):
self.model = setup_model()
self.comet_ml = setup_comet_ml()
self.test_dice = DiceScore(name="testing_dice")
self.test_loss = Mean(name="testing_loss")
self.training_dice = DiceScore(name="training_dice")
self.training_loss = Mean(name="training_loss")
self.cl, self.es, self.mc, self.pp = setup_callbacks()
self.cl.model, self.es.model, self.mc.model = \
self.model, self.model, self.model
self.pp.model = self.model
self.pp.comet_ml = self.comet_ml
def fit(self):
loader = CLEVR()
train_ds, valid_ds, test_ds = loader.load()
with self.comet_ml.train():
self.cl.on_train_begin()
self.es.on_train_begin()
self.mc.on_train_begin()
self.pp.on_train_begin()
for epoch in range(config.EXPERIMENT_EPOCHS):
self.comet_ml.set_epoch(epoch)
for images, labels in train_ds:
self.train_step(images, labels)
for batch, (images, labels) in enumerate(valid_ds):
self.test_step(images, labels)
if not batch: self.pp.on_epoch_end(epoch, images, labels)
logs = {
"dice": self.training_dice.result().numpy(),
"loss": self.training_loss.result().numpy(),
"validation_dice": self.test_dice.result().numpy(),
"validation_loss": self.test_loss.result().numpy(),
}
template = ("Epoch {}. Training Loss: {}. Training Dice: {}. "
"Validation Loss: {}. Validation Dice: {}.")
print(template.format(epoch + 1,
logs['loss'],
logs['dice'],
logs['validation_loss'],
logs['validation_dice']))
self.comet_ml.log_metrics(logs, epoch=epoch)
self.cl.on_epoch_end(epoch, logs)
self.es.on_epoch_end(epoch, logs)
self.mc.on_epoch_end(epoch, logs)
self.training_dice.reset_states()
self.training_loss.reset_states()
self.test_dice.reset_states()
self.test_loss.reset_states()
if self.es.model.stop_training:
self.cl.on_train_end()
self.es.on_train_end()
self.mc.on_train_end()
break
with self.comet_ml.test():
for batch, (images, labels) in enumerate(test_ds):
self.test_step(images, labels)
if not batch:
self.pp.on_test_end(images, labels)
logs = {
"dice": self.test_dice.result().numpy(),
"loss": self.test_loss.result().numpy(),
}
print("Test Loss: {}. Test Dice: {}.".format(
logs['loss'], logs['dice']))
self.comet_ml.log_metrics(logs)
_write_results(logs)
@tf.function
def train_step(self, images, labels):
with tf.GradientTape() as tape:
predictions = self.model.inference(images)
loss = self.model.loss(labels, predictions)
gradients = tape.gradient(loss, self.model.trainable_variables)
self.model.optimiser.apply_gradients(
zip(gradients, self.model.trainable_variables))
self.training_loss(loss)
self.training_dice(labels, predictions)
@tf.function
def test_step(self, images, labels):
predictions = self.model.inference(images)
t_loss = self.model.loss(labels, predictions)
self.test_loss(t_loss)
self.test_dice(labels, predictions)
| true
| true
|
f7042e33a6e4a8e11c00eba052a8af8e91c9a9a7
| 4,115
|
py
|
Python
|
dataset/generate_tip4p_data.py
|
BaratiLab/GAMD
|
7de91526f1c8c06ea005920e6a55c3cf031c26b2
|
[
"MIT"
] | null | null | null |
dataset/generate_tip4p_data.py
|
BaratiLab/GAMD
|
7de91526f1c8c06ea005920e6a55c3cf031c26b2
|
[
"MIT"
] | null | null | null |
dataset/generate_tip4p_data.py
|
BaratiLab/GAMD
|
7de91526f1c8c06ea005920e6a55c3cf031c26b2
|
[
"MIT"
] | 1
|
2022-03-17T19:39:18.000Z
|
2022-03-17T19:39:18.000Z
|
from openmmtools import testsystems
from simtk.openmm.app import *
import simtk.unit as unit
import logging
import numpy as np
from openmmtools.constants import kB
from openmmtools import respa, utils
logger = logging.getLogger(__name__)
# Energy unit used by OpenMM unit system
from openmmtools import states, integrators
import time
import numpy as np
import sys
import os
def get_rotation_matrix():
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
Nx3 array, original point clouds
Return:
Nx3 array, rotated point clouds
"""
angles = np.random.uniform(-1.0, 1.0, size=(3,)) * np.pi
print(f'Using angle: {angles}')
Rx = np.array([[1., 0, 0],
[0, np.cos(angles[0]), -np.sin(angles[0])],
[0, np.sin(angles[0]), np.cos(angles[0])]], dtype=np.float32)
Ry = np.array([[np.cos(angles[1]), 0, np.sin(angles[1])],
[0, 1, 0],
[-np.sin(angles[1]), 0, np.cos(angles[1])]], dtype=np.float32)
Rz = np.array([[np.cos(angles[2]), -np.sin(angles[2]), 0],
[np.sin(angles[2]), np.cos(angles[2]), 0],
[0, 0, 1]], dtype=np.float32)
rotation_matrix = np.matmul(Rz, np.matmul(Ry, Rx))
return rotation_matrix
def center_positions(pos):
offset = np.mean(pos, axis=0)
return pos - offset, offset
BOX_SCALE = 2
DT = 2
for seed in range(10):
print(f'Running seed: {seed}')
waterbox = testsystems.WaterBox(
box_edge=2 * unit.nanometers,
model='tip4pew')
[topology, system, positions] = [waterbox.topology, waterbox.system, waterbox.positions]
R = get_rotation_matrix()
positions = positions.value_in_unit(unit.angstrom)
positions, off = center_positions(positions)
positions = np.matmul(positions, R)
positions += off
positions += np.random.randn(positions.shape[0], positions.shape[1]) * 0.005
positions *= unit.angstrom
p_num = positions.shape[0] // 3
timestep = DT * unit.femtoseconds
temperature = 300 * unit.kelvin
chain_length = 10
friction = 1. / unit.picosecond
num_mts = 5
num_yoshidasuzuki = 5
integrator = integrators.NoseHooverChainVelocityVerletIntegrator(system,
temperature,
friction,
timestep, chain_length, num_mts, num_yoshidasuzuki)
simulation = Simulation(topology, system, integrator)
simulation.context.setPositions(positions)
simulation.context.setVelocitiesToTemperature(temperature)
simulation.minimizeEnergy(tolerance=1*unit.kilojoule/unit.mole)
simulation.step(1)
os.makedirs(f'./water_data_tip4p/', exist_ok=True)
dataReporter_gt = StateDataReporter(f'./log_nvt_tip4p_{seed}.txt', 50, totalSteps=50000,
step=True, time=True, speed=True, progress=True, elapsedTime=True, remainingTime=True,
potentialEnergy=True, kineticEnergy=True, totalEnergy=True, temperature=True,
separator='\t')
simulation.reporters.append(dataReporter_gt)
for t in range(1000):
if (t+1)%100 == 0:
print(f'Finished {(t+1)*50} steps')
state = simulation.context.getState(getPositions=True,
getVelocities=True,
getForces=True,
enforcePeriodicBox=True)
pos = state.getPositions(asNumpy=True).value_in_unit(unit.angstrom)
vel = state.getVelocities(asNumpy=True).value_in_unit(unit.meter / unit.second)
force = state.getForces(asNumpy=True).value_in_unit(unit.kilojoules_per_mole/unit.nanometer)
np.savez(f'./water_data_tip4p/data_{seed}_{t}.npz',
pos=pos,
vel=vel,
forces=force)
simulation.step(50)
| 36.741071
| 121
| 0.600243
|
from openmmtools import testsystems
from simtk.openmm.app import *
import simtk.unit as unit
import logging
import numpy as np
from openmmtools.constants import kB
from openmmtools import respa, utils
logger = logging.getLogger(__name__)
from openmmtools import states, integrators
import time
import numpy as np
import sys
import os
def get_rotation_matrix():
angles = np.random.uniform(-1.0, 1.0, size=(3,)) * np.pi
print(f'Using angle: {angles}')
Rx = np.array([[1., 0, 0],
[0, np.cos(angles[0]), -np.sin(angles[0])],
[0, np.sin(angles[0]), np.cos(angles[0])]], dtype=np.float32)
Ry = np.array([[np.cos(angles[1]), 0, np.sin(angles[1])],
[0, 1, 0],
[-np.sin(angles[1]), 0, np.cos(angles[1])]], dtype=np.float32)
Rz = np.array([[np.cos(angles[2]), -np.sin(angles[2]), 0],
[np.sin(angles[2]), np.cos(angles[2]), 0],
[0, 0, 1]], dtype=np.float32)
rotation_matrix = np.matmul(Rz, np.matmul(Ry, Rx))
return rotation_matrix
def center_positions(pos):
offset = np.mean(pos, axis=0)
return pos - offset, offset
BOX_SCALE = 2
DT = 2
for seed in range(10):
print(f'Running seed: {seed}')
waterbox = testsystems.WaterBox(
box_edge=2 * unit.nanometers,
model='tip4pew')
[topology, system, positions] = [waterbox.topology, waterbox.system, waterbox.positions]
R = get_rotation_matrix()
positions = positions.value_in_unit(unit.angstrom)
positions, off = center_positions(positions)
positions = np.matmul(positions, R)
positions += off
positions += np.random.randn(positions.shape[0], positions.shape[1]) * 0.005
positions *= unit.angstrom
p_num = positions.shape[0] // 3
timestep = DT * unit.femtoseconds
temperature = 300 * unit.kelvin
chain_length = 10
friction = 1. / unit.picosecond
num_mts = 5
num_yoshidasuzuki = 5
integrator = integrators.NoseHooverChainVelocityVerletIntegrator(system,
temperature,
friction,
timestep, chain_length, num_mts, num_yoshidasuzuki)
simulation = Simulation(topology, system, integrator)
simulation.context.setPositions(positions)
simulation.context.setVelocitiesToTemperature(temperature)
simulation.minimizeEnergy(tolerance=1*unit.kilojoule/unit.mole)
simulation.step(1)
os.makedirs(f'./water_data_tip4p/', exist_ok=True)
dataReporter_gt = StateDataReporter(f'./log_nvt_tip4p_{seed}.txt', 50, totalSteps=50000,
step=True, time=True, speed=True, progress=True, elapsedTime=True, remainingTime=True,
potentialEnergy=True, kineticEnergy=True, totalEnergy=True, temperature=True,
separator='\t')
simulation.reporters.append(dataReporter_gt)
for t in range(1000):
if (t+1)%100 == 0:
print(f'Finished {(t+1)*50} steps')
state = simulation.context.getState(getPositions=True,
getVelocities=True,
getForces=True,
enforcePeriodicBox=True)
pos = state.getPositions(asNumpy=True).value_in_unit(unit.angstrom)
vel = state.getVelocities(asNumpy=True).value_in_unit(unit.meter / unit.second)
force = state.getForces(asNumpy=True).value_in_unit(unit.kilojoules_per_mole/unit.nanometer)
np.savez(f'./water_data_tip4p/data_{seed}_{t}.npz',
pos=pos,
vel=vel,
forces=force)
simulation.step(50)
| true
| true
|
f7042e390a07e1c0d3c7ad4c593ca6540931ac90
| 966
|
py
|
Python
|
hashing.py
|
bernardosulzbach/scripts
|
9c91d9688873d5a41fdc4ff54688f5b042866867
|
[
"BSD-2-Clause"
] | null | null | null |
hashing.py
|
bernardosulzbach/scripts
|
9c91d9688873d5a41fdc4ff54688f5b042866867
|
[
"BSD-2-Clause"
] | 5
|
2015-12-29T14:35:42.000Z
|
2016-02-06T04:55:48.000Z
|
hashing.py
|
mafagafogigante/scripts
|
9c91d9688873d5a41fdc4ff54688f5b042866867
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import hashlib
def _update_sha256(filename, sha256):
"""
Updates a SHA-256 algorithm with the filename and the contents of a file.
"""
block_size = 64 * 1024 # 64 KB
with open(filename, 'rb') as input_file:
while True:
data = input_file.read(block_size)
if not data:
break
sha256.update(data)
sha256.update(filename.encode("utf-8"))
return sha256
def hash_tree(root):
"""
Returns a cryptographically secure hash for a whole directory tree taking into account the names and the content of
the files.
"""
file_list = []
for root_directory, directories, files in os.walk(root):
for file in files:
file_list.append(os.path.join(root_directory, file))
sorted_file_list = sorted(file_list)
sha256 = hashlib.sha256()
for file in sorted_file_list:
_update_sha256(file, sha256)
return sha256.hexdigest()
| 28.411765
| 119
| 0.643892
|
import os
import hashlib
def _update_sha256(filename, sha256):
block_size = 64 * 1024 with open(filename, 'rb') as input_file:
while True:
data = input_file.read(block_size)
if not data:
break
sha256.update(data)
sha256.update(filename.encode("utf-8"))
return sha256
def hash_tree(root):
file_list = []
for root_directory, directories, files in os.walk(root):
for file in files:
file_list.append(os.path.join(root_directory, file))
sorted_file_list = sorted(file_list)
sha256 = hashlib.sha256()
for file in sorted_file_list:
_update_sha256(file, sha256)
return sha256.hexdigest()
| true
| true
|
f7042e6c3b8b3bbe394ec0ff65053648fc05d117
| 1,139
|
py
|
Python
|
core/functions/__init__.py
|
annapoulakos/advent-of-code
|
95bf7eb282045194af46f482c3ab847c91f62c44
|
[
"MIT"
] | 3
|
2020-12-03T19:56:50.000Z
|
2021-11-19T00:20:04.000Z
|
core/functions/__init__.py
|
annapoulakos/advent-of-code
|
95bf7eb282045194af46f482c3ab847c91f62c44
|
[
"MIT"
] | null | null | null |
core/functions/__init__.py
|
annapoulakos/advent-of-code
|
95bf7eb282045194af46f482c3ab847c91f62c44
|
[
"MIT"
] | null | null | null |
def destructure(obj, *params):
import operator
return operator.itemgetter(*params)(obj)
def greet(**kwargs):
year, day, puzzle = destructure(kwargs, 'year', 'day', 'puzzle')
print('Advent of Code')
print(f'-> {year}-{day}-{puzzle}')
print('--------------')
def load_data(filename):
with filename.open('r') as handle:
return handle.read()
def start(fn):
import pathlib
base_path = pathlib.Path(__file__).parent.parent / 'data'
def wrapped(*args, **kwargs):
greet(**kwargs)
data = load_data(base_path / f'{kwargs["year"]}.{kwargs["day"]}.txt')
return fn(data, *args, **kwargs)
return wrapped
def flatten_json(nested_json):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
for i, a in enumerate(x):
flatten(a, name + str(i) + '_')
else:
out[name[:-1]] = x
flatten(nested_json)
return out
def sparse_matrix():
from collections import defaultdict
return defaultdict(lambda: 0)
| 27.119048
| 77
| 0.568042
|
def destructure(obj, *params):
import operator
return operator.itemgetter(*params)(obj)
def greet(**kwargs):
year, day, puzzle = destructure(kwargs, 'year', 'day', 'puzzle')
print('Advent of Code')
print(f'-> {year}-{day}-{puzzle}')
print('--------------')
def load_data(filename):
with filename.open('r') as handle:
return handle.read()
def start(fn):
import pathlib
base_path = pathlib.Path(__file__).parent.parent / 'data'
def wrapped(*args, **kwargs):
greet(**kwargs)
data = load_data(base_path / f'{kwargs["year"]}.{kwargs["day"]}.txt')
return fn(data, *args, **kwargs)
return wrapped
def flatten_json(nested_json):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
for i, a in enumerate(x):
flatten(a, name + str(i) + '_')
else:
out[name[:-1]] = x
flatten(nested_json)
return out
def sparse_matrix():
from collections import defaultdict
return defaultdict(lambda: 0)
| true
| true
|
f7042f27a52d9b6035ccb6bdd9f2e40115fbae3f
| 3,170
|
py
|
Python
|
stix/common/information_source.py
|
santosomar/python-stix
|
cf0ea6861d9fd4dec6003d948b6901cada954c4d
|
[
"BSD-3-Clause"
] | 4
|
2019-02-25T18:18:16.000Z
|
2020-12-19T06:23:28.000Z
|
stix/common/information_source.py
|
santosomar/python-stix
|
cf0ea6861d9fd4dec6003d948b6901cada954c4d
|
[
"BSD-3-Clause"
] | null | null | null |
stix/common/information_source.py
|
santosomar/python-stix
|
cf0ea6861d9fd4dec6003d948b6901cada954c4d
|
[
"BSD-3-Clause"
] | 1
|
2019-02-25T18:18:18.000Z
|
2019-02-25T18:18:18.000Z
|
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
# external
from mixbox import fields
import cybox.common
from cybox.common.tools import ToolInformationList
# internal
import stix
import stix.bindings.stix_common as stix_common_binding
# relative
from .vocabs import VocabField
from .references import References
from .identity import Identity, IdentityFactory
from .structured_text import StructuredTextList
class InformationSource(stix.Entity):
_binding = stix_common_binding
_binding_class = stix_common_binding.InformationSourceType
_namespace = 'http://stix.mitre.org/common-1'
identity = fields.TypedField("Identity", type_=Identity, factory=IdentityFactory)
descriptions = fields.TypedField("Description", StructuredTextList)
contributing_sources = fields.TypedField("Contributing_Sources", type_="stix.common.information_source.ContributingSources")
time = fields.TypedField("Time", cybox.common.Time)
roles = VocabField("Role", multiple=True, key_name="roles")
tools = fields.TypedField("Tools", ToolInformationList)
references = fields.TypedField("References", References)
def __init__(self, description=None, identity=None, time=None, tools=None, contributing_sources=None, references=None):
super(InformationSource, self).__init__()
self.identity = identity
self.descriptions = StructuredTextList(description)
self.contributing_sources = contributing_sources
self.time = time
self.tools = tools
self.references = references
#self.roles = None
def add_contributing_source(self, value):
self.contributing_sources.append(value)
def add_reference(self, value):
if not value:
return
# TODO: Check if it's a valid URI?
self.references.append(value)
@property
def description(self):
"""A single description about the contents or purpose of this object.
Default Value: ``None``
Note:
If this object has more than one description set, this will return
the description with the lowest ordinality value.
Returns:
An instance of :class:`.StructuredText`
"""
return next(iter(self.descriptions), None)
@description.setter
def description(self, value):
from stix.common.structured_text import StructuredTextList
self.descriptions = StructuredTextList(value)
def add_description(self, description):
"""Adds a description to the ``descriptions`` collection.
This is the same as calling "foo.descriptions.add(bar)".
"""
self.descriptions.add(description)
def add_role(self, value):
self.roles.append(value)
class ContributingSources(stix.EntityList):
_namespace = "http://stix.mitre.org/common-1"
_binding = stix_common_binding
_binding_class = stix_common_binding.ContributingSourcesType
source = fields.TypedField("Source", InformationSource, multiple=True, key_name="sources")
@classmethod
def _dict_as_list(cls):
return False
| 32.346939
| 128
| 0.712303
|
from mixbox import fields
import cybox.common
from cybox.common.tools import ToolInformationList
import stix
import stix.bindings.stix_common as stix_common_binding
from .vocabs import VocabField
from .references import References
from .identity import Identity, IdentityFactory
from .structured_text import StructuredTextList
class InformationSource(stix.Entity):
_binding = stix_common_binding
_binding_class = stix_common_binding.InformationSourceType
_namespace = 'http://stix.mitre.org/common-1'
identity = fields.TypedField("Identity", type_=Identity, factory=IdentityFactory)
descriptions = fields.TypedField("Description", StructuredTextList)
contributing_sources = fields.TypedField("Contributing_Sources", type_="stix.common.information_source.ContributingSources")
time = fields.TypedField("Time", cybox.common.Time)
roles = VocabField("Role", multiple=True, key_name="roles")
tools = fields.TypedField("Tools", ToolInformationList)
references = fields.TypedField("References", References)
def __init__(self, description=None, identity=None, time=None, tools=None, contributing_sources=None, references=None):
super(InformationSource, self).__init__()
self.identity = identity
self.descriptions = StructuredTextList(description)
self.contributing_sources = contributing_sources
self.time = time
self.tools = tools
self.references = references
def add_contributing_source(self, value):
self.contributing_sources.append(value)
def add_reference(self, value):
if not value:
return
self.references.append(value)
@property
def description(self):
return next(iter(self.descriptions), None)
@description.setter
def description(self, value):
from stix.common.structured_text import StructuredTextList
self.descriptions = StructuredTextList(value)
def add_description(self, description):
self.descriptions.add(description)
def add_role(self, value):
self.roles.append(value)
class ContributingSources(stix.EntityList):
_namespace = "http://stix.mitre.org/common-1"
_binding = stix_common_binding
_binding_class = stix_common_binding.ContributingSourcesType
source = fields.TypedField("Source", InformationSource, multiple=True, key_name="sources")
@classmethod
def _dict_as_list(cls):
return False
| true
| true
|
f7042f70611897f37c769bc82c9c072a8a0174f4
| 16,025
|
py
|
Python
|
django/utils/datastructures.py
|
graingert/django
|
784d0c261c76535dc760bc8d76793d92f35c1513
|
[
"BSD-3-Clause"
] | 1
|
2015-11-11T12:20:45.000Z
|
2015-11-11T12:20:45.000Z
|
django/utils/datastructures.py
|
graingert/django
|
784d0c261c76535dc760bc8d76793d92f35c1513
|
[
"BSD-3-Clause"
] | null | null | null |
django/utils/datastructures.py
|
graingert/django
|
784d0c261c76535dc760bc8d76793d92f35c1513
|
[
"BSD-3-Clause"
] | null | null | null |
import copy
from types import GeneratorType
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
self.dicts = dicts
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_.keys():
return dict_.getlist(key)
return []
def iteritems(self):
seen = set()
for dict_ in self.dicts:
for item in dict_.iteritems():
k, v = item
if k in seen:
continue
seen.add(k)
yield item
def iterkeys(self):
for k, v in self.iteritems():
yield k
def itervalues(self):
for k, v in self.iteritems():
yield v
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
__iter__ = iterkeys
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
def __str__(self):
'''
Returns something like
"{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}"
instead of the generic "<object meta-data>" inherited from object.
'''
return str(dict(self.items()))
def __repr__(self):
'''
Returns something like
MergeDict({'key1': 'val1', 'key2': 'val2'}, {'key3': 'val3'})
instead of generic "<object meta-data>" inherited from object.
'''
dictreprs = ', '.join(repr(d) for d in self.dicts)
return '%s(%s)' % (self.__class__.__name__, dictreprs)
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
elif isinstance(data, GeneratorType):
# Unfortunately we need to be able to read a generator twice. Once
# to get the data into self with our super().__init__ call and a
# second time to setup keyOrder correctly
data = list(data)
super(SortedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
seen = set()
for key, value in data:
if key not in seen:
self.keyOrder.append(key)
seen.add(key)
def __deepcopy__(self, memo):
return self.__class__([(key, copy.deepcopy(value, memo))
for key, value in self.iteritems()])
def __copy__(self):
# The Python's default copy implementation will alter the state
# of self. The reason for this seems complex but is likely related to
# subclassing dict.
return self.copy()
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return map(self.__getitem__, self.keyOrder)
def itervalues(self):
for key in self.keyOrder:
yield self[key]
def update(self, dict_):
for k, v in dict_.iteritems():
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Returns the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
return self.__class__(self)
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.getlist('doesnotexist')
[]
>>> d.getlist('doesnotexist', ['Adrian', 'Simon'])
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
obj_dict['_data'] = dict([(k, self.getlist(k)) for k in self])
return obj_dict
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key, default=None):
"""
Returns the list of values for the passed key. If key doesn't exist,
then a default value is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
if default is None:
return []
return default
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return default
return self[key]
def setlistdefault(self, key, default_list=None):
if key not in self:
if default_list is None:
default_list = []
self.setlist(key, default_list)
return default_list
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key).append(value)
def items(self):
"""
Returns a list of (key, value) pairs, where value is the last item in
the list associated with the key.
"""
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
"""
Yields (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self.keys():
yield (key, self[key])
def lists(self):
"""Returns a list of (key, list) pairs."""
return super(MultiValueDict, self).items()
def iterlists(self):
"""Yields (key, list) pairs."""
return super(MultiValueDict, self).iteritems()
def values(self):
"""Returns a list of the last value on every key list."""
return [self[key] for key in self.keys()]
def itervalues(self):
"""Yield the last value on every key list."""
for key in self.iterkeys():
yield self[key]
def copy(self):
"""Returns a shallow copy of this object."""
return copy.copy(self)
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in kwargs.iteritems():
self.setlistdefault(key).append(value)
def dict(self):
"""
Returns current object as a dict with singular values.
"""
return dict((key, self[key]) for key in self)
class DotExpandedDict(dict):
"""
A special dictionary constructor that takes a dictionary in which the keys
may contain dots to specify inner dictionaries. It's confusing, but this
example should make sense.
>>> d = DotExpandedDict({'person.1.firstname': ['Simon'], \
'person.1.lastname': ['Willison'], \
'person.2.firstname': ['Adrian'], \
'person.2.lastname': ['Holovaty']})
>>> d
{'person': {'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}}
>>> d['person']
{'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}
>>> d['person']['1']
{'lastname': ['Willison'], 'firstname': ['Simon']}
# Gotcha: Results are unpredictable if the dots are "uneven":
>>> DotExpandedDict({'c.1': 2, 'c.2': 3, 'c': 1})
{'c': 1}
"""
def __init__(self, key_to_list_mapping):
for k, v in key_to_list_mapping.items():
current = self
bits = k.split('.')
for bit in bits[:-1]:
current = current.setdefault(bit, {})
# Now assign value to current position
try:
current[bits[-1]] = v
except TypeError: # Special-case if current isn't a dict.
current = {bits[-1]: v}
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wraps accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super(DictWrapper, self).__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieves the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
| 31.237817
| 131
| 0.566365
|
import copy
from types import GeneratorType
class MergeDict(object):
def __init__(self, *dicts):
self.dicts = dicts
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_.keys():
return dict_.getlist(key)
return []
def iteritems(self):
seen = set()
for dict_ in self.dicts:
for item in dict_.iteritems():
k, v = item
if k in seen:
continue
seen.add(k)
yield item
def iterkeys(self):
for k, v in self.iteritems():
yield k
def itervalues(self):
for k, v in self.iteritems():
yield v
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
__iter__ = iterkeys
def copy(self):
return self.__copy__()
def __str__(self):
return str(dict(self.items()))
def __repr__(self):
dictreprs = ', '.join(repr(d) for d in self.dicts)
return '%s(%s)' % (self.__class__.__name__, dictreprs)
class SortedDict(dict):
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
elif isinstance(data, GeneratorType):
data = list(data)
super(SortedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
seen = set()
for key, value in data:
if key not in seen:
self.keyOrder.append(key)
seen.add(key)
def __deepcopy__(self, memo):
return self.__class__([(key, copy.deepcopy(value, memo))
for key, value in self.iteritems()])
def __copy__(self):
# of self. The reason for this seems complex but is likely related to
# subclassing dict.
return self.copy()
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return map(self.__getitem__, self.keyOrder)
def itervalues(self):
for key in self.keyOrder:
yield self[key]
def update(self, dict_):
for k, v in dict_.iteritems():
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def value_for_index(self, index):
return self[self.keyOrder[index]]
def insert(self, index, key, value):
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value)
def copy(self):
return self.__class__(self)
def __repr__(self):
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
obj_dict['_data'] = dict([(k, self.getlist(k)) for k in self])
return obj_dict
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key, default=None):
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
if default is None:
return []
return default
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return default
return self[key]
def setlistdefault(self, key, default_list=None):
if key not in self:
if default_list is None:
default_list = []
self.setlist(key, default_list)
return default_list
return self.getlist(key)
def appendlist(self, key, value):
self.setlistdefault(key).append(value)
def items(self):
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
for key in self.keys():
yield (key, self[key])
def lists(self):
return super(MultiValueDict, self).items()
def iterlists(self):
return super(MultiValueDict, self).iteritems()
def values(self):
return [self[key] for key in self.keys()]
def itervalues(self):
for key in self.iterkeys():
yield self[key]
def copy(self):
return copy.copy(self)
def update(self, *args, **kwargs):
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in kwargs.iteritems():
self.setlistdefault(key).append(value)
def dict(self):
return dict((key, self[key]) for key in self)
class DotExpandedDict(dict):
def __init__(self, key_to_list_mapping):
for k, v in key_to_list_mapping.items():
current = self
bits = k.split('.')
for bit in bits[:-1]:
current = current.setdefault(bit, {})
try:
current[bits[-1]] = v
except TypeError: current = {bits[-1]: v}
class ImmutableList(tuple):
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
def __init__(self, data, func, prefix):
super(DictWrapper, self).__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
| true
| true
|
f7042fc40e681680f30a61dd7dd41d217592fd03
| 5,291
|
py
|
Python
|
src/basic1.py
|
harika-24/Image-Processing-and-Machine-Learning-using-Parallel-Computing
|
b13b8f20551a9d5960b146713182b167e35d65e7
|
[
"MIT"
] | null | null | null |
src/basic1.py
|
harika-24/Image-Processing-and-Machine-Learning-using-Parallel-Computing
|
b13b8f20551a9d5960b146713182b167e35d65e7
|
[
"MIT"
] | null | null | null |
src/basic1.py
|
harika-24/Image-Processing-and-Machine-Learning-using-Parallel-Computing
|
b13b8f20551a9d5960b146713182b167e35d65e7
|
[
"MIT"
] | null | null | null |
import os
import sys
import dlib
import glob
import csv
import pickle as pp
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
from sklearn import preprocessing
# from sklearn.model_selection import train_test_split
import webbrowser
from timeit import Timer
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import numpy as np
from time import time
import time
import multiprocessing
from flask import Flask, render_template, request
from PIL import Image
from elasticsearch import Elasticsearch
from tensorflow.python.keras._impl.keras.preprocessing.image import img_to_array
from twilio.rest import Client
from flask import Flask, render_template, request, url_for
app = Flask(__name__, template_folder='templates')
App_root=os.path.dirname("maintype")
@app.route("/knn")
def classify(try_vector): #CLASIFIER OPTION -A using KNN
start_time = time.time()
print("in classifier======================================================")
p_1=pp.load(open('model.p','rb'))
p_2=pp.load(open('model_1.p','rb'))
pred = p_1.predict([try_vector])
v = p_2.inverse_transform(pred)
print(p_2.inverse_transform(pred))
print("My program took", time.time() - start_time, "to run")
return v
def vector(destination,option): ###CONVERTING IMAGE INTO 128 vectors --DLIB
predictor_path = "shape_predictor_5_face_landmarks.dat"
face_rec_model_path = "dlib_face_recognition_resnet_model_v1.dat"
faces_folder_path ="/home/sethiamayank14/PycharmProjects/project2/src/"+destination
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(predictor_path)
facerec = dlib.face_recognition_model_v1(face_rec_model_path)
img = dlib.load_rgb_image(faces_folder_path)
dets = detector(img, 1)
for k, d in enumerate(dets):
shape = sp(img, d)
face_descriptor = facerec.compute_face_descriptor(img, shape)
try_vector=face_descriptor
#print("======================================",try_vector)
if option == "KNN":
d = classify(try_vector) #knn
print(d)
# if(d=="Akash Bhaiya"):
#
# account_sid = 'AC48a2b57630cde3ad7acc662ea91cf5fd'
# auth_token = '101da4d773c821ed0c60d7f7dd17cb98'
# client = Client(account_sid, auth_token)
#
# message = client.messages \
# .create(
# body="Employee Akash entered",
# from_='+15052786996',
# to='+918826748151'
# )
#
# print(message.sid)
# else:
# account_sid = 'AC48a2b57630cde3ad7acc662ea91cf5fd'
# auth_token = '101da4d773c821ed0c60d7f7dd17cb98'
# client = Client(account_sid, auth_token)
#
# message = client.messages \
# .create(
# body="intruder detected",
# from_='+15052786996',
# to='+918826748151'
# )
#
# print(message.sid)
return d
@app.route("/") # this runs first
def index():
print("index working==================================")
return render_template("upload1.html")
@app.route("/upload", methods = ['POST'])
def upload():
# print("heyy========================")
target = os.path.join(App_root, "images/")
# print("hello")
if not os.path.isdir(target):
print("In here")
os.mkdir(target)
print("-----------------------",request.files.getlist("file"))
for file in request.files.getlist("file"):
filename = file.filename
destination ="".join([target, filename])
print(destination)
file.save(destination)
option = request.form['classifier']
print(option)
if( option == "KNN"):
name1 = vector(destination,option)
name1 = str(name1[0])
print(name1, type(name1))
f = open('helloworld.html', 'w')
# name = "Akash Bhaiya"
name = name1 + '.jpg'
print(name)
name2 = "/home/sethiamayank14/PycharmProjects/project2/src/images/"+ name
print(name2)
message = """<html>
<head></head>
<body>
<p>Your input image: </p>
<br>
<img src = "/home/sethiamayank14/PycharmProjects/project2/src/""" + destination + """"/>
<br>
<p>Standard Image:</p>
<br>
<img src = "/home/sethiamayank14/PycharmProjects/project2/src/images/""" + name + """"/>
<p> """ + name1 + """</p>
</body>
</html>"""
print(message)
f.write(message)
f.close()
# Change path to reflect file location
filename = 'helloworld.html'
webbrowser.open_new_tab(filename)
return name
# return name
if __name__== "__main__":
app.run(debug=True,port=5001,host='127.0.0.1')
| 31.682635
| 122
| 0.558307
|
import os
import sys
import dlib
import glob
import csv
import pickle as pp
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
from sklearn import preprocessing
import webbrowser
from timeit import Timer
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import numpy as np
from time import time
import time
import multiprocessing
from flask import Flask, render_template, request
from PIL import Image
from elasticsearch import Elasticsearch
from tensorflow.python.keras._impl.keras.preprocessing.image import img_to_array
from twilio.rest import Client
from flask import Flask, render_template, request, url_for
app = Flask(__name__, template_folder='templates')
App_root=os.path.dirname("maintype")
@app.route("/knn")
def classify(try_vector): start_time = time.time()
print("in classifier======================================================")
p_1=pp.load(open('model.p','rb'))
p_2=pp.load(open('model_1.p','rb'))
pred = p_1.predict([try_vector])
v = p_2.inverse_transform(pred)
print(p_2.inverse_transform(pred))
print("My program took", time.time() - start_time, "to run")
return v
def vector(destination,option): predictor_path = "shape_predictor_5_face_landmarks.dat"
face_rec_model_path = "dlib_face_recognition_resnet_model_v1.dat"
faces_folder_path ="/home/sethiamayank14/PycharmProjects/project2/src/"+destination
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(predictor_path)
facerec = dlib.face_recognition_model_v1(face_rec_model_path)
img = dlib.load_rgb_image(faces_folder_path)
dets = detector(img, 1)
for k, d in enumerate(dets):
shape = sp(img, d)
face_descriptor = facerec.compute_face_descriptor(img, shape)
try_vector=face_descriptor
if option == "KNN":
d = classify(try_vector) print(d)
return d
@app.route("/") def index():
print("index working==================================")
return render_template("upload1.html")
@app.route("/upload", methods = ['POST'])
def upload():
target = os.path.join(App_root, "images/")
if not os.path.isdir(target):
print("In here")
os.mkdir(target)
print("-----------------------",request.files.getlist("file"))
for file in request.files.getlist("file"):
filename = file.filename
destination ="".join([target, filename])
print(destination)
file.save(destination)
option = request.form['classifier']
print(option)
if( option == "KNN"):
name1 = vector(destination,option)
name1 = str(name1[0])
print(name1, type(name1))
f = open('helloworld.html', 'w')
name = name1 + '.jpg'
print(name)
name2 = "/home/sethiamayank14/PycharmProjects/project2/src/images/"+ name
print(name2)
message = """<html>
<head></head>
<body>
<p>Your input image: </p>
<br>
<img src = "/home/sethiamayank14/PycharmProjects/project2/src/""" + destination + """"/>
<br>
<p>Standard Image:</p>
<br>
<img src = "/home/sethiamayank14/PycharmProjects/project2/src/images/""" + name + """"/>
<p> """ + name1 + """</p>
</body>
</html>"""
print(message)
f.write(message)
f.close()
filename = 'helloworld.html'
webbrowser.open_new_tab(filename)
return name
if __name__== "__main__":
app.run(debug=True,port=5001,host='127.0.0.1')
| true
| true
|
f7042fdc0b2e66c421515786c31e873a156f7422
| 262
|
py
|
Python
|
dyn2sel/dcs_techniques/desdd_selection.py
|
luccaportes/Scikit-DYN2SEL
|
3e102f4fff5696277c57997fb811139c5e6f8b4d
|
[
"MIT"
] | 1
|
2021-08-21T21:21:29.000Z
|
2021-08-21T21:21:29.000Z
|
dyn2sel/dcs_techniques/desdd_selection.py
|
luccaportes/Scikit-DYN2SEL
|
3e102f4fff5696277c57997fb811139c5e6f8b4d
|
[
"MIT"
] | 10
|
2020-10-27T13:37:36.000Z
|
2021-09-11T02:40:51.000Z
|
dyn2sel/dcs_techniques/desdd_selection.py
|
luccaportes/Scikit-DYN2SEL
|
3e102f4fff5696277c57997fb811139c5e6f8b4d
|
[
"MIT"
] | 1
|
2021-11-24T07:20:42.000Z
|
2021-11-24T07:20:42.000Z
|
from dyn2sel.dcs_techniques import DCSTechnique
import numpy as np
from scipy.stats import mode
class DESDDSel(DCSTechnique):
def predict(self, ensemble, instances, real_labels=None):
return ensemble[ensemble.get_max_accuracy()].predict(instances)
| 29.111111
| 71
| 0.790076
|
from dyn2sel.dcs_techniques import DCSTechnique
import numpy as np
from scipy.stats import mode
class DESDDSel(DCSTechnique):
def predict(self, ensemble, instances, real_labels=None):
return ensemble[ensemble.get_max_accuracy()].predict(instances)
| true
| true
|
f7042fe61841ae00fa3573f79327e8f2bc2dcb99
| 1,421
|
py
|
Python
|
tests/tests/test_vm_coexist.py
|
jurobystricky/tdx-tools
|
c4eedb04a784fdfff724453499045ea6e369a818
|
[
"Apache-2.0"
] | 11
|
2021-12-21T01:32:59.000Z
|
2022-03-30T14:37:45.000Z
|
tests/tests/test_vm_coexist.py
|
jurobystricky/tdx-tools
|
c4eedb04a784fdfff724453499045ea6e369a818
|
[
"Apache-2.0"
] | 15
|
2022-01-12T00:40:59.000Z
|
2022-03-31T17:03:42.000Z
|
tests/tests/test_vm_coexist.py
|
jurobystricky/tdx-tools
|
c4eedb04a784fdfff724453499045ea6e369a818
|
[
"Apache-2.0"
] | 7
|
2021-12-20T11:45:46.000Z
|
2022-03-15T06:22:52.000Z
|
"""
This module provide the case to test the coexistance between TDX guest and non TD
guest. There are two types of non-TD guest:
1. Boot with legacy BIOS, it is default loader without pass "-loader" or "-bios"
option
2. Boot with OVMF UEFI BIOS, will boot with "-loader" => OVMFD.fd compiled from
the latest edk2 project.
"""
import logging
import pytest
from pycloudstack.vmparam import VM_TYPE_LEGACY, VM_TYPE_EFI, VM_TYPE_TD
__author__ = 'cpio'
LOG = logging.getLogger(__name__)
# pylint: disable=invalid-name
pytestmark = [
pytest.mark.vm_image("latest-guest-image"),
pytest.mark.vm_kernel("latest-guest-kernel"),
]
def test_tdguest_with_legacy_base(vm_factory):
"""
Test the different type VM run parallel
Test Steps
----------
1. Launch a TD guest
2. Launch a legacy guest
3. Launch an OVMF guest
"""
LOG.info("Create a TD guest")
td_inst = vm_factory.new_vm(VM_TYPE_TD, auto_start=True)
LOG.info("Create a legacy guest")
legacy_inst = vm_factory.new_vm(VM_TYPE_LEGACY, auto_start=True)
LOG.info("Create an OVMF guest")
efi_inst = vm_factory.new_vm(VM_TYPE_EFI, auto_start=True)
assert td_inst.wait_for_ssh_ready(), "Could not reach TD VM"
assert legacy_inst.wait_for_ssh_ready(), "Could not reach legacy VM"
assert efi_inst.wait_for_ssh_ready(), "Could not reach EFI VM"
| 29
| 82
| 0.695285
|
import logging
import pytest
from pycloudstack.vmparam import VM_TYPE_LEGACY, VM_TYPE_EFI, VM_TYPE_TD
__author__ = 'cpio'
LOG = logging.getLogger(__name__)
pytestmark = [
pytest.mark.vm_image("latest-guest-image"),
pytest.mark.vm_kernel("latest-guest-kernel"),
]
def test_tdguest_with_legacy_base(vm_factory):
LOG.info("Create a TD guest")
td_inst = vm_factory.new_vm(VM_TYPE_TD, auto_start=True)
LOG.info("Create a legacy guest")
legacy_inst = vm_factory.new_vm(VM_TYPE_LEGACY, auto_start=True)
LOG.info("Create an OVMF guest")
efi_inst = vm_factory.new_vm(VM_TYPE_EFI, auto_start=True)
assert td_inst.wait_for_ssh_ready(), "Could not reach TD VM"
assert legacy_inst.wait_for_ssh_ready(), "Could not reach legacy VM"
assert efi_inst.wait_for_ssh_ready(), "Could not reach EFI VM"
| true
| true
|
f70430b6d3a2a0dae8784dc1baf8f2c60b7a5d8d
| 2,002
|
py
|
Python
|
pre_commit_hooks/loaderon_hooks/tests/general_hooks/check_location_test.py
|
alvaroscelza/pre-commit-hooks
|
fc9a7a376dc733a1e3cc00b5ed35936bcb3c3b3b
|
[
"MIT"
] | null | null | null |
pre_commit_hooks/loaderon_hooks/tests/general_hooks/check_location_test.py
|
alvaroscelza/pre-commit-hooks
|
fc9a7a376dc733a1e3cc00b5ed35936bcb3c3b3b
|
[
"MIT"
] | null | null | null |
pre_commit_hooks/loaderon_hooks/tests/general_hooks/check_location_test.py
|
alvaroscelza/pre-commit-hooks
|
fc9a7a376dc733a1e3cc00b5ed35936bcb3c3b3b
|
[
"MIT"
] | null | null | null |
import sys
import pytest
from pre_commit_hooks.loaderon_hooks.tests.util.test_helpers import perform_test_on_file_expecting_result
from pre_commit_hooks.loaderon_hooks.general_hooks.check_location import main
@pytest.fixture(autouse=True)
def clean_sys_argv():
sys.argv = []
# Each line is a directory that allows certain types of files.
sys.argv.append('--directories')
sys.argv.append(r'.*\/xml')
sys.argv.append('--directories')
sys.argv.append(r'.*\/javascript')
# Each line specifies what types of files can be located inside the directory.
sys.argv.append('--files')
sys.argv.append(r'correct_xml.xml')
sys.argv.append('--files')
sys.argv.append(r'correct_js.js')
yield
def test_locations_ok_1():
perform_test_on_file_expecting_result('check_location_samples/xml/correct_xml.xml', main)
def test_locations_ok_2():
perform_test_on_file_expecting_result('check_location_samples/javascript/correct_js.js', main)
def test_locations_error1():
perform_test_on_file_expecting_result('check_location_samples/xml/incorrect_js.js', main, expected_result=2)
def test_locations_error2():
perform_test_on_file_expecting_result('check_location_samples/not_enabled_directory/incorrect_xml.xml', main, expected_result=2)
def test_locations_arguments_size_mismatch_error():
sys.argv = []
sys.argv.append('--directories')
sys.argv.append(r'.*\/xml')
# Lacking files for this directory
sys.argv.append('--directories')
sys.argv.append(r'.*\/javascript')
sys.argv.append('--files')
sys.argv.append(r'correct_xml.xml')
perform_test_on_file_expecting_result('check_location_samples/xml/correct_xml.xml', main, expected_result=2)
def test_locations_no_arguments_error():
sys.argv = []
with pytest.raises(TypeError) as error:
perform_test_on_file_expecting_result('check_location_samples/xml/correct_xml.xml', main)
assert "'NoneType' object is not iterable" in str(error.value)
| 31.777778
| 132
| 0.758741
|
import sys
import pytest
from pre_commit_hooks.loaderon_hooks.tests.util.test_helpers import perform_test_on_file_expecting_result
from pre_commit_hooks.loaderon_hooks.general_hooks.check_location import main
@pytest.fixture(autouse=True)
def clean_sys_argv():
sys.argv = []
sys.argv.append('--directories')
sys.argv.append(r'.*\/xml')
sys.argv.append('--directories')
sys.argv.append(r'.*\/javascript')
sys.argv.append('--files')
sys.argv.append(r'correct_xml.xml')
sys.argv.append('--files')
sys.argv.append(r'correct_js.js')
yield
def test_locations_ok_1():
perform_test_on_file_expecting_result('check_location_samples/xml/correct_xml.xml', main)
def test_locations_ok_2():
perform_test_on_file_expecting_result('check_location_samples/javascript/correct_js.js', main)
def test_locations_error1():
perform_test_on_file_expecting_result('check_location_samples/xml/incorrect_js.js', main, expected_result=2)
def test_locations_error2():
perform_test_on_file_expecting_result('check_location_samples/not_enabled_directory/incorrect_xml.xml', main, expected_result=2)
def test_locations_arguments_size_mismatch_error():
sys.argv = []
sys.argv.append('--directories')
sys.argv.append(r'.*\/xml')
sys.argv.append('--directories')
sys.argv.append(r'.*\/javascript')
sys.argv.append('--files')
sys.argv.append(r'correct_xml.xml')
perform_test_on_file_expecting_result('check_location_samples/xml/correct_xml.xml', main, expected_result=2)
def test_locations_no_arguments_error():
sys.argv = []
with pytest.raises(TypeError) as error:
perform_test_on_file_expecting_result('check_location_samples/xml/correct_xml.xml', main)
assert "'NoneType' object is not iterable" in str(error.value)
| true
| true
|
f704311c1696242df8f2316227f5b99a2b3d08b4
| 506
|
py
|
Python
|
Week1/Lecture2/Fexes/l2f1.py
|
MorbidValkyria/MIT6.0001x
|
3c80ffd50871387f560c2e820ad1fa05c61d9132
|
[
"MIT"
] | null | null | null |
Week1/Lecture2/Fexes/l2f1.py
|
MorbidValkyria/MIT6.0001x
|
3c80ffd50871387f560c2e820ad1fa05c61d9132
|
[
"MIT"
] | null | null | null |
Week1/Lecture2/Fexes/l2f1.py
|
MorbidValkyria/MIT6.0001x
|
3c80ffd50871387f560c2e820ad1fa05c61d9132
|
[
"MIT"
] | null | null | null |
"""
1) "a" + "bc" -> abc
2) 3 * "bc" -> bcbcbc
3) "3" * "bc" -> error as we can't use the * operator on two strings
4) abcd"[2] -> c (Just takes the character at index 2 in the string. a has index 0 and b index 1)
5) "abcd"[0:2] -> ab (Returns the substring from index 0 all the way to index n -1 in this case b)
6) "abcd"[:2] -> ab (Not giving a starting value to slice the string we start at 0)
7) "abcd"[2:] -> cd (When we don't give an end value it goes all the way to the end of the string)
"""
| 31.625
| 98
| 0.626482
| true
| true
|
|
f7043401959412943bac256ec0284c88028ab154
| 4,508
|
py
|
Python
|
configs/restorers/basicvsr/basicvsr_vimeo90k_bd.py
|
wangna11BD/mmediting
|
25410895914edc5938f526fc41b1776a36ac1b51
|
[
"Apache-2.0"
] | 1
|
2021-04-20T02:24:02.000Z
|
2021-04-20T02:24:02.000Z
|
configs/restorers/basicvsr/basicvsr_vimeo90k_bd.py
|
wangna11BD/mmediting
|
25410895914edc5938f526fc41b1776a36ac1b51
|
[
"Apache-2.0"
] | null | null | null |
configs/restorers/basicvsr/basicvsr_vimeo90k_bd.py
|
wangna11BD/mmediting
|
25410895914edc5938f526fc41b1776a36ac1b51
|
[
"Apache-2.0"
] | null | null | null |
exp_name = 'basicvsr_vimeo90k_bd'
# model settings
model = dict(
type='BasicVSR',
generator=dict(
type='BasicVSRNet',
mid_channels=64,
num_blocks=30,
spynet_pretrained='pretrained_models/spynet.pth'),
pixel_loss=dict(type='CharbonnierLoss', loss_weight=1.0, reduction='mean'))
# model training and testing settings
train_cfg = dict(fix_iter=5000)
test_cfg = dict(metrics=['PSNR'], crop_border=0, convert_to='y')
# dataset settings
train_dataset_type = 'SRVimeo90KMultipleGTDataset'
val_dataset_type = 'SRTestMultipleGTDataset'
test_dataset_type = 'SRVimeo90KDataset'
train_pipeline = [
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='PairedRandomCrop', gt_patch_size=256),
dict(
type='Flip', keys=['lq', 'gt'], flip_ratio=0.5,
direction='horizontal'),
dict(type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, direction='vertical'),
dict(type='RandomTransposeHW', keys=['lq', 'gt'], transpose_ratio=0.5),
dict(type='MirrorSequence', keys=['lq', 'gt']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path'])
]
val_pipeline = [
dict(type='GenerateSegmentIndices', interval_list=[1]),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
test_pipeline = [
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='MirrorSequence', keys=['lq']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
data = dict(
workers_per_gpu=6,
train_dataloader=dict(samples_per_gpu=4, drop_last=True), # 2 gpus
val_dataloader=dict(samples_per_gpu=1),
test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=1),
# train
train=dict(
type='RepeatDataset',
times=1000,
dataset=dict(
type=train_dataset_type,
lq_folder='data/vimeo90k/BDx4',
gt_folder='data/vimeo90k/GT',
ann_file='data/vimeo90k/meta_info_Vimeo90K_train_GT.txt',
pipeline=train_pipeline,
scale=4,
test_mode=False)),
# val
val=dict(
type=val_dataset_type,
lq_folder='data/Vid4/BDx4',
gt_folder='data/Vid4/GT',
pipeline=val_pipeline,
scale=4,
test_mode=True),
# test
test=dict(
type=test_dataset_type,
lq_folder='data/vimeo90k/BDx4',
gt_folder='data/vimeo90k/GT',
ann_file='data/vimeo90k/meta_info_Vimeo90K_test_GT.txt',
pipeline=test_pipeline,
scale=4,
num_input_frames=7,
test_mode=True),
)
# optimizer
optimizers = dict(
generator=dict(
type='Adam',
lr=2e-4,
betas=(0.9, 0.99),
paramwise_cfg=dict(custom_keys={'spynet': dict(lr_mult=0.125)})))
# learning policy
total_iters = 300000
lr_config = dict(
policy='CosineRestart',
by_epoch=False,
periods=[300000],
restart_weights=[1],
min_lr=1e-7)
checkpoint_config = dict(interval=5, save_optimizer=True, by_epoch=False)
# remove gpu_collect=True in non distributed training
evaluation = dict(interval=5000, save_image=False, gpu_collect=True)
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
# dict(type='TensorboardLoggerHook'),
])
visual_config = None
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = f'./work_dirs/{exp_name}'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 28.713376
| 79
| 0.618234
|
exp_name = 'basicvsr_vimeo90k_bd'
model = dict(
type='BasicVSR',
generator=dict(
type='BasicVSRNet',
mid_channels=64,
num_blocks=30,
spynet_pretrained='pretrained_models/spynet.pth'),
pixel_loss=dict(type='CharbonnierLoss', loss_weight=1.0, reduction='mean'))
train_cfg = dict(fix_iter=5000)
test_cfg = dict(metrics=['PSNR'], crop_border=0, convert_to='y')
train_dataset_type = 'SRVimeo90KMultipleGTDataset'
val_dataset_type = 'SRTestMultipleGTDataset'
test_dataset_type = 'SRVimeo90KDataset'
train_pipeline = [
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='PairedRandomCrop', gt_patch_size=256),
dict(
type='Flip', keys=['lq', 'gt'], flip_ratio=0.5,
direction='horizontal'),
dict(type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, direction='vertical'),
dict(type='RandomTransposeHW', keys=['lq', 'gt'], transpose_ratio=0.5),
dict(type='MirrorSequence', keys=['lq', 'gt']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path'])
]
val_pipeline = [
dict(type='GenerateSegmentIndices', interval_list=[1]),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
test_pipeline = [
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='MirrorSequence', keys=['lq']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
data = dict(
workers_per_gpu=6,
train_dataloader=dict(samples_per_gpu=4, drop_last=True), val_dataloader=dict(samples_per_gpu=1),
test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=1),
train=dict(
type='RepeatDataset',
times=1000,
dataset=dict(
type=train_dataset_type,
lq_folder='data/vimeo90k/BDx4',
gt_folder='data/vimeo90k/GT',
ann_file='data/vimeo90k/meta_info_Vimeo90K_train_GT.txt',
pipeline=train_pipeline,
scale=4,
test_mode=False)),
val=dict(
type=val_dataset_type,
lq_folder='data/Vid4/BDx4',
gt_folder='data/Vid4/GT',
pipeline=val_pipeline,
scale=4,
test_mode=True),
test=dict(
type=test_dataset_type,
lq_folder='data/vimeo90k/BDx4',
gt_folder='data/vimeo90k/GT',
ann_file='data/vimeo90k/meta_info_Vimeo90K_test_GT.txt',
pipeline=test_pipeline,
scale=4,
num_input_frames=7,
test_mode=True),
)
optimizers = dict(
generator=dict(
type='Adam',
lr=2e-4,
betas=(0.9, 0.99),
paramwise_cfg=dict(custom_keys={'spynet': dict(lr_mult=0.125)})))
total_iters = 300000
lr_config = dict(
policy='CosineRestart',
by_epoch=False,
periods=[300000],
restart_weights=[1],
min_lr=1e-7)
checkpoint_config = dict(interval=5, save_optimizer=True, by_epoch=False)
evaluation = dict(interval=5000, save_image=False, gpu_collect=True)
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
])
visual_config = None
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = f'./work_dirs/{exp_name}'
load_from = None
resume_from = None
workflow = [('train', 1)]
| true
| true
|
f704346d161ef25a72528a244f15f8a8a9895a9f
| 1,531
|
py
|
Python
|
setup.py
|
dbradf/evgflip
|
5e7408d817ee1cb7823dd299b50d5959126756d4
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
dbradf/evgflip
|
5e7408d817ee1cb7823dd299b50d5959126756d4
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
dbradf/evgflip
|
5e7408d817ee1cb7823dd299b50d5959126756d4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from glob import glob
from os.path import basename
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='evgflip',
version='0.1.0',
license='Apache License, Version 2.0',
description='',
long_description=long_description,
long_description_content_type='text/markdown',
author='David Bradford',
author_email='david.bradford@mongodb.com',
url='https://github.com/dbradf/evgflip',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
'Intended Audience :: Developers',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
install_requires=[
'boltons==19.1.0',
'Click==7.0',
'evergreen.py==0.5.0',
'PyYAML==5.4',
'structlog==19.1.0',
],
entry_points='''
[console_scripts]
evg-flip=evgflip.cli:main
''',
)
| 28.351852
| 74
| 0.636185
|
from __future__ import absolute_import
from __future__ import print_function
from glob import glob
from os.path import basename
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='evgflip',
version='0.1.0',
license='Apache License, Version 2.0',
description='',
long_description=long_description,
long_description_content_type='text/markdown',
author='David Bradford',
author_email='david.bradford@mongodb.com',
url='https://github.com/dbradf/evgflip',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
'Intended Audience :: Developers',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
install_requires=[
'boltons==19.1.0',
'Click==7.0',
'evergreen.py==0.5.0',
'PyYAML==5.4',
'structlog==19.1.0',
],
entry_points='''
[console_scripts]
evg-flip=evgflip.cli:main
''',
)
| true
| true
|
f704356f4fb720f2bf93b959c1a2be1943a0b37d
| 2,002
|
py
|
Python
|
examples/devel/importing.py
|
markdoerr/pymol-open-source
|
b891b59ffaea812600648aa131ea2dbecd59a199
|
[
"CNRI-Python"
] | 2
|
2019-05-23T22:17:29.000Z
|
2020-07-03T14:36:22.000Z
|
examples/devel/importing.py
|
markdoerr/pymol-open-source
|
b891b59ffaea812600648aa131ea2dbecd59a199
|
[
"CNRI-Python"
] | null | null | null |
examples/devel/importing.py
|
markdoerr/pymol-open-source
|
b891b59ffaea812600648aa131ea2dbecd59a199
|
[
"CNRI-Python"
] | null | null | null |
# This is an example of firing up PyMOL inside of a subordinate
# process via an "import pymol"
#
# NOTE: for this to work, PyMOL must be installed in a
# Python-dependent fashion (e.g. pymol-0_98-bin-win32-py23) etc.
#
# WARNING: stability issues have been known to occur with this
# approach, so anticipate problems...take-down is messy.
#
# WARNING: Right now, there is no way for the main process to know
# when PyMOL is actually initialized and ready to go, so we simply
# sleep a second after importing.
import string
import __main__
# note that passing in a "-z" option would keep the window hidden
# until you called pymol.cmd.window("show").
__main__.pymol_argv= string.split("pymol -qxiF -X 300 -Y 100 -H 400 -W 400")
import pymol
# give PyMOL enough time to initialize (we need to find a safe and
# robust alternative to this stupid delay especially since the
# pymol.finish_launching() method now seems to be broken)
import time
time.sleep(1)
# put up some content
if 1:
pymol.cmd.set("sweep_mode",3)
pymol.cmd.rock()
pymol.cmd.turn("x",180)
pymol.cmd.load("$TUT/1hpv.pdb")
pymol.preset.pretty("1hpv")
pymol.cmd.orient()
pymol.cmd.turn("y",85)
pymol.cmd.zoom("all",20)
pymol.cmd.orient("organic & e. N+O",animate=10)
pymol.cmd.show("sticks","organic")
# play peek-a-boo with the window
if 1:
time.sleep(5)
pymol.cmd.window("hide")
print("Peek-a-boo!")
time.sleep(1)
pymol.cmd.window("show")
time.sleep(5)
pymol.cmd.window("hide")
print("Peek-a-boo!")
time.sleep(1)
pymol.cmd.window("show")
time.sleep(5)
pymol.cmd.window("hide")
print("Peek-a-boo!")
time.sleep(1)
pymol.cmd.window("show")
# now quit
print("Quitting...")
time.sleep(1)
print("3...")
time.sleep(1)
print("2...")
time.sleep(1)
print("1...")
time.sleep(1)
print("Die!")
# note, we cannot let the main thread terminate without first calling
# pymol.cmd.quit() which will take-down PyMOL
pymol.cmd.quit()
| 24.414634
| 77
| 0.67982
|
import string
import __main__
__main__.pymol_argv= string.split("pymol -qxiF -X 300 -Y 100 -H 400 -W 400")
import pymol
import time
time.sleep(1)
if 1:
pymol.cmd.set("sweep_mode",3)
pymol.cmd.rock()
pymol.cmd.turn("x",180)
pymol.cmd.load("$TUT/1hpv.pdb")
pymol.preset.pretty("1hpv")
pymol.cmd.orient()
pymol.cmd.turn("y",85)
pymol.cmd.zoom("all",20)
pymol.cmd.orient("organic & e. N+O",animate=10)
pymol.cmd.show("sticks","organic")
if 1:
time.sleep(5)
pymol.cmd.window("hide")
print("Peek-a-boo!")
time.sleep(1)
pymol.cmd.window("show")
time.sleep(5)
pymol.cmd.window("hide")
print("Peek-a-boo!")
time.sleep(1)
pymol.cmd.window("show")
time.sleep(5)
pymol.cmd.window("hide")
print("Peek-a-boo!")
time.sleep(1)
pymol.cmd.window("show")
print("Quitting...")
time.sleep(1)
print("3...")
time.sleep(1)
print("2...")
time.sleep(1)
print("1...")
time.sleep(1)
print("Die!")
pymol.cmd.quit()
| true
| true
|
f70435e6588b6eff0658bd07e3715657ae154bef
| 387
|
py
|
Python
|
algorithms/recursion/sum_of_sequence.py
|
zhijunsheng/tictactoe-py
|
648bed3bbf56d441805d472c73b7951b73469f20
|
[
"MIT"
] | null | null | null |
algorithms/recursion/sum_of_sequence.py
|
zhijunsheng/tictactoe-py
|
648bed3bbf56d441805d472c73b7951b73469f20
|
[
"MIT"
] | null | null | null |
algorithms/recursion/sum_of_sequence.py
|
zhijunsheng/tictactoe-py
|
648bed3bbf56d441805d472c73b7951b73469f20
|
[
"MIT"
] | null | null | null |
import unittest
def linear_sum(S, n):
"""Return the sum of the first n numbers of sequence S."""
if n == 0:
return 0
else:
return linear_sum(S, n - 1) + S[n - 1]
class TestLinearSum(unittest.TestCase):
def test_linear_sum(self):
S = [4, 3, 6, 2, 8]
self.assertEqual(23, linear_sum(S, 5))
if __name__ == '__main__':
unittest.main()
| 21.5
| 62
| 0.589147
|
import unittest
def linear_sum(S, n):
if n == 0:
return 0
else:
return linear_sum(S, n - 1) + S[n - 1]
class TestLinearSum(unittest.TestCase):
def test_linear_sum(self):
S = [4, 3, 6, 2, 8]
self.assertEqual(23, linear_sum(S, 5))
if __name__ == '__main__':
unittest.main()
| true
| true
|
f704376b4a39e532d7296da675a5c7c10f97297a
| 593
|
py
|
Python
|
polls/urls.py
|
FrankCasanova/poll-django
|
4df8889d2802cd211a993d5de43f663cd6ef9a30
|
[
"MIT"
] | null | null | null |
polls/urls.py
|
FrankCasanova/poll-django
|
4df8889d2802cd211a993d5de43f663cd6ef9a30
|
[
"MIT"
] | null | null | null |
polls/urls.py
|
FrankCasanova/poll-django
|
4df8889d2802cd211a993d5de43f663cd6ef9a30
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
#here are our app-connections.(these connection just affect to our app, not at entire system)
#each connection going us to a view functionality
#these connections needs to be connect with url root, because that's where the requests come from
app_name = 'polls'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('<int:pk>/', views.DetailView.as_view(), name='detail'),
path('<int:pk>/result/', views.ResultView.as_view(), name='result'),
path('<int:question_id>/vote/', views.vote, name='vote'),
]
| 34.882353
| 97
| 0.70489
|
from django.urls import path
from . import views
app_name = 'polls'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('<int:pk>/', views.DetailView.as_view(), name='detail'),
path('<int:pk>/result/', views.ResultView.as_view(), name='result'),
path('<int:question_id>/vote/', views.vote, name='vote'),
]
| true
| true
|
f70438d8f78b8f084550f654f4578c7326e7838c
| 2,120
|
py
|
Python
|
classes/menu.py
|
howard-2718/untitled_rpg
|
49654afbfb548676df5d72d35e47b9e06eefa7a7
|
[
"MIT"
] | null | null | null |
classes/menu.py
|
howard-2718/untitled_rpg
|
49654afbfb548676df5d72d35e47b9e06eefa7a7
|
[
"MIT"
] | null | null | null |
classes/menu.py
|
howard-2718/untitled_rpg
|
49654afbfb548676df5d72d35e47b9e06eefa7a7
|
[
"MIT"
] | null | null | null |
"""
Menu handling file
- Every menu is of the Menu class
- Menus are initialized with an array of options
- What a menu option does is determined by the following table:
- "set_state_map": s.set_state('map')
- "exit": exit()
"""
from config import *
import sys
class Menu:
def __init__(self, options, sel_index, results):
self.options = options # Array of strings
self.results = results # Array of strings
self._sel_index = sel_index
self.first_print = True
@property
def sel_index(self):
return self._sel_index
@sel_index.setter
def sel_index(self, value):
length = len(self.options)
if value > length:
self._sel_index = 1
elif value < 1:
self._sel_index = length
else:
self._sel_index = value
@sel_index.deleter
def sel_index(self):
del self._sel_index
def print_menu_center(self):
if not self.first_print:
print(t.move_up(len(self.options) + 1))
for _ in range(len(self.options) + 1):
print(t.clear_eol)
print(t.move_up(len(self.options) + 2))
count = 1
for option in self.options:
if self.sel_index == count:
print(t.center("> " + str(count) + ". " + option))
else:
print(t.center(str(count) + ". " + option))
count += 1
self.first_print = False
# Prints a menu at cursor where x and y is the top left of the menu
# Specifically meant for use in the 'battle' state
def battle_menu(self):
output = []
count = 1
for option in self.options:
if self.sel_index == count:
output.append("> " + str(count) + ". " + option)
else:
output.append(str(count) + ". " + option)
count += 1
return output
def decision(self):
choice = self.results[(self.sel_index-1)]
if choice == "set_state_map":
s.set_state('map')
elif choice == "exit":
sys.exit()
| 26.17284
| 71
| 0.556132
|
from config import *
import sys
class Menu:
def __init__(self, options, sel_index, results):
self.options = options self.results = results self._sel_index = sel_index
self.first_print = True
@property
def sel_index(self):
return self._sel_index
@sel_index.setter
def sel_index(self, value):
length = len(self.options)
if value > length:
self._sel_index = 1
elif value < 1:
self._sel_index = length
else:
self._sel_index = value
@sel_index.deleter
def sel_index(self):
del self._sel_index
def print_menu_center(self):
if not self.first_print:
print(t.move_up(len(self.options) + 1))
for _ in range(len(self.options) + 1):
print(t.clear_eol)
print(t.move_up(len(self.options) + 2))
count = 1
for option in self.options:
if self.sel_index == count:
print(t.center("> " + str(count) + ". " + option))
else:
print(t.center(str(count) + ". " + option))
count += 1
self.first_print = False
def battle_menu(self):
output = []
count = 1
for option in self.options:
if self.sel_index == count:
output.append("> " + str(count) + ". " + option)
else:
output.append(str(count) + ". " + option)
count += 1
return output
def decision(self):
choice = self.results[(self.sel_index-1)]
if choice == "set_state_map":
s.set_state('map')
elif choice == "exit":
sys.exit()
| true
| true
|
f70439ea5e1c811be29ccac4a2c1991c2f496ec6
| 128,492
|
py
|
Python
|
third_party/ply/ply/yacc.py
|
albertobarri/idk
|
a250884f79e2a484251fc750bb915ecbc962be58
|
[
"MIT"
] | 9,724
|
2015-01-01T02:06:30.000Z
|
2019-01-17T15:13:51.000Z
|
third_party/ply/yacc.py
|
1065672644894730302/Chromium
|
239dd49e906be4909e293d8991e998c9816eaa35
|
[
"BSD-3-Clause"
] | 7,584
|
2019-01-17T22:58:27.000Z
|
2022-03-31T23:10:22.000Z
|
third_party/ply/yacc.py
|
1065672644894730302/Chromium
|
239dd49e906be4909e293d8991e998c9816eaa35
|
[
"BSD-3-Clause"
] | 1,519
|
2015-01-01T18:11:12.000Z
|
2019-01-17T14:16:02.000Z
|
# -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2011,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
__version__ = "3.4"
__tabversion__ = "3.2" # Table version
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = 1 # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = 0 # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
import re, types, sys, os.path
# Compatibility function for python 2.6/3.0
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# Compatibility
try:
MAXINT = sys.maxint
except AttributeError:
MAXINT = sys.maxsize
# Python 2.x/3.0 compatibility.
def load_ply_lex():
if sys.version_info[0] < 3:
import lex
else:
import ply.lex as lex
return lex
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def debug(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
info = debug
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception): pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit]+" ..."
result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return "<%s @ 0x%x>" % (type(r).__name__,id(r))
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self): return self.type
def __repr__(self): return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self,s,stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser= None
def __getitem__(self,n):
if n >= 0: return self.slice[n].value
else: return self.stack[n].value
def __setitem__(self,n,v):
self.slice[n].value = v
def __getslice__(self,i,j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self,n):
return getattr(self.slice[n],"lineno",0)
def set_lineno(self,n,lineno):
self.slice[n].lineno = lineno
def linespan(self,n):
startline = getattr(self.slice[n],"lineno",0)
endline = getattr(self.slice[n],"endlineno",startline)
return startline,endline
def lexpos(self,n):
return getattr(self.slice[n],"lexpos",0)
def lexspan(self,n):
startpos = getattr(self.slice[n],"lexpos",0)
endpos = getattr(self.slice[n],"endlexpos",startpos)
return startpos,endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self,lrtab,errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
def errok(self):
self.errorok = 1
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug,int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
elif tracking:
return self.parseopt(input,lexer,debug,tracking,tokenfunc)
else:
return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. For the non-debugging version,
# copy this code to a method parseopt() and delete all of the sections
# enclosed in:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# --! DEBUG
debug.info("PLY: PARSE DEBUG START")
# --! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = "$end"
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
# --! DEBUG
debug.debug('')
debug.debug('State : %s', state)
# --! DEBUG
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = "$end"
# --! DEBUG
debug.debug('Stack : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
# --! DEBUG
debug.debug("Action : Shift and goto state %s", t)
# --! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
# --! DEBUG
if plen:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t)
else:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t)
# --! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n,"value",None)
# --! DEBUG
debug.info("Done : Returning %s", format_result(result))
debug.info("PLY: PARSE DEBUG END")
# --! DEBUG
return result
if t == None:
# --! DEBUG
debug.error('Error : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == "$end":
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != "$end":
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == "$end":
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY.
# Edit the debug version above, then copy any modifications to the method
# below while removing #--! DEBUG sections.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove
# code in the #--! TRACKING sections
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
import re
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = [ ]
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = "%s -> %s" % (self.name," ".join(self.prod))
else:
self.str = "%s -> <empty>" % self.name
def __str__(self):
return self.str
def __repr__(self):
return "Production("+str(self)+")"
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self,index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self,n):
if n > len(self.prod): return None
p = LRItem(self,n)
# Precompute the list of productions immediately following. Hack. Remove later
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError,KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self,str,name,len,func,file,line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return "MiniProduction(%s)" % self.str
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self,p,n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = { }
self.prod.insert(n,".")
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = "%s -> %s" % (self.name," ".join(self.prod))
else:
s = "%s -> <empty>" % self.name
return s
def __repr__(self):
return "LRItem("+str(self)+")"
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError): pass
class Grammar(object):
def __init__(self,terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = { } # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = { } # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = { } # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = { } # A dictionary of precomputed FIRST(x) symbols
self.Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = { } # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = { } # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self,index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self,term,assoc,level):
assert self.Productions == [None],"Must call set_precedence() before add_production()"
if term in self.Precedence:
raise GrammarError("Precedence already specified for terminal '%s'" % term)
if assoc not in ['left','right','nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc,level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self,prodname,syms,func=None,file='',line=0):
if prodname in self.Terminals:
raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname))
if prodname == 'error':
raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname))
if not _is_identifier.match(prodname):
raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname))
# Look for literal tokens
for n,s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname))
if not c in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line))
if syms[-2] != '%prec':
raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line))
precname = syms[-1]
prodprec = self.Precedence.get(precname,None)
if not prodprec:
raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname))
else:
self.UsedPrecedence[precname] = 1
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms,self.Terminals)
prodprec = self.Precedence.get(precname,('right',0))
# See if the rule is already in the rulemap
map = "%s -> %s" % (prodname,syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) +
"Previous definition at %s:%d" % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if not prodname in self.Nonterminals:
self.Nonterminals[prodname] = [ ]
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if not t in self.Nonterminals:
self.Nonterminals[t] = [ ]
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber,prodname,syms,prodprec,func,file,line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [ p ]
return 0
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self,start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError("start symbol %s undefined" % start)
self.Productions[0] = Production(0,"S'",[start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if reachable[s]:
# We've already reached symbol s.
return
reachable[s] = 1
for p in self.Prodnames.get(s,[]):
for r in p.prod:
mark_reachable_from(r)
reachable = { }
for s in list(self.Terminals) + list(self.Nonterminals):
reachable[s] = 0
mark_reachable_from( self.Productions[0].prod[0] )
return [s for s in list(self.Nonterminals)
if not reachable[s]]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = 1
terminates['$end'] = 1
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = 0
# Then propagate termination until no change:
while 1:
some_change = 0
for (n,pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = 0
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = 1
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = 1
some_change = 1
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s,term) in terminates.items():
if not term:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p: continue
for s in p.prod:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
result.append((s,p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s,v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s,v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname,self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self,beta):
# We are computing First(x1,x2,x3,...,xn)
result = [ ]
for x in beta:
x_produces_empty = 0
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = 1
else:
if f not in result: result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while 1:
some_change = 0
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append( f )
some_change = 1
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self,start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = [ ]
if not start:
start = self.Productions[1].name
self.Follow[start] = [ '$end' ]
while 1:
didadd = 0
for p in self.Productions[1:]:
# Here is the production set
for i in range(len(p.prod)):
B = p.prod[i]
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = 0
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if f == '<empty>':
hasempty = 1
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if not didadd: break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while 1:
if i > len(p):
lri = None
else:
lri = LRItem(p,i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError,KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri: break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError): pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self,module):
if isinstance(module,types.ModuleType):
parsetab = module
else:
if sys.version_info[0] < 3:
exec("import %s as parsetab" % module)
else:
env = { }
exec("import %s as parsetab" % module, env, env)
parsetab = env['parsetab']
if parsetab._tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self,filename):
try:
import cPickle as pickle
except ImportError:
import pickle
in_f = open(filename,"rb")
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self,pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X,R,FP):
N = { }
for x in X:
N[x] = 0
stack = []
F = { }
for x in X:
if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
return F
def traverse(x,N,stack,F,X,R,FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y,N,stack,F,X,R,FP)
N[x] = min(N[x],N[y])
for a in F.get(y,[]):
if a not in F[x]: F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError): pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self,grammar,method='LALR',log=None):
if method not in ['SLR','LALR']:
raise LALRError("Unsupported method %s" % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self,I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = 1
while didadd:
didadd = 0
for j in J:
for x in j.lr_after:
if getattr(x,"lr0_added",0) == self._add_count: continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = 1
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self,I,x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I),x),None)
if g: return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x,None)
if not s:
s = { }
self.lr_goto_cache[x] = s
gs = [ ]
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n),None)
if not s1:
s1 = { }
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end',None)
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I),x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = { }
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I,x)
if not g: continue
if id(g) in self.lr0_cidhash: continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = {}
num_nullable = 0
while 1:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable[p.name] = 1
continue
for t in p.prod:
if not t in nullable: break
else:
nullable[p.name] = 1
if len(nullable) == num_nullable: break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self,C):
trans = []
for state in range(len(C)):
for p in C[state]:
if p.lr_index < p.len - 1:
t = (state,p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans: trans.append(t)
state = state + 1
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self,C,trans,nullable):
dr_set = { }
state,N = trans
terms = []
g = self.lr0_goto(C[state],N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms: terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self,C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state],N)
j = self.lr0_cidhash.get(id(g),-1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j,a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self,C,trans,nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state,N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N: continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j,t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals: break # No forget it
if not p.prod[li] in nullable: break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j,t))
g = self.lr0_goto(C[j],t) # Go to next set
j = self.lr0_cidhash.get(id(g),-1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name: continue
if r.len != p.len: continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]: break
i = i + 1
else:
lookb.append((j,r))
for i in includes:
if not i in includedict: includedict[i] = []
includedict[i].append((state,N))
lookdict[(state,N)] = lookb
return lookdict,includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self,C, ntrans, nullable):
FP = lambda x: self.dr_relation(C,x,nullable)
R = lambda x: self.reads_relation(C,x,nullable)
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self,ntrans,readsets,inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x,[])
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self,lookbacks,followset):
for trans,lb in lookbacks.items():
# Loop over productions in lookback
for state,p in lb:
if not state in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans,[])
for a in f:
if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self,C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C,trans,nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C,trans,nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans,readsets,included)
# Add all of the lookaheads
self.add_lookaheads(lookd,followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = { } # Action production array (temporary)
log.info("Parsing method: %s", self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [ ] # List of actions
st_action = { }
st_actionp = { }
st_goto = { }
log.info("")
log.info("state %d", st)
log.info("")
for p in I:
log.info(" (%d) %s", p.number, str(p))
log.info("")
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action["$end"] = 0
st_actionp["$end"] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
r = st_action.get(a,None)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec,slevel = Productions[st_actionp[a].number].prec
rprec,rlevel = Precedence.get(a,('right',0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp,rejectp = pp,oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp,rejectp = oldp,pp
self.rr_conflicts.append((st,chosenp,rejectp))
log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a])
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I,a)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
# We are in a shift state
actlist.append((a,p,"shift and go to state %d" % j))
r = st_action.get(a,None)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError("Shift/shift conflict in state %d" % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec,rlevel = Productions[st_actionp[a].number].prec
sprec,slevel = Precedence.get(a,('right',0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = { }
for a,p,m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(" %-15s %s",a,m)
_actprint[(a,m)] = 1
log.info("")
# Print the actions that were not used. (debugging)
not_used = 0
for a,p,m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a,m) in _actprint:
log.debug(" ! %-15s [ %s ]",a,m)
not_used = 1
_actprint[(a,m)] = 1
if not_used:
log.debug("")
# Construct the goto table for this state
nkeys = { }
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I,n)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
st_goto[n] = j
log.info(" %-30s shift and go to state %d",n,j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self,modulename,outputdir='',signature=""):
basemodulename = modulename.split(".")[-1]
filename = os.path.join(outputdir,basemodulename) + ".py"
try:
f = open(filename,"w")
f.write("""
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
""" % (filename, __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = { }
for s,nd in self.lr_action.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_action_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
""")
else:
f.write("\n_lr_action = { ");
for k,v in self.lr_action.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
if smaller:
# Factor out names to try and make smaller
items = { }
for s,nd in self.lr_goto.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_goto_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
""")
else:
f.write("\n_lr_goto = { ");
for k,v in self.lr_goto.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
# Write production table
f.write("_lr_productions = [\n")
for p in self.lr_productions:
if p.func:
f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line))
else:
f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len))
f.write("]\n")
f.close()
except IOError:
e = sys.exc_info()[1]
sys.stderr.write("Unable to create '%s'\n" % filename)
sys.stderr.write(str(e)+"\n")
return
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self,filename,signature=""):
try:
import cPickle as pickle
except ImportError:
import pickle
outf = open(filename,"wb")
pickle.dump(__tabversion__,outf,pickle_protocol)
pickle.dump(self.lr_method,outf,pickle_protocol)
pickle.dump(signature,outf,pickle_protocol)
pickle.dump(self.lr_action,outf,pickle_protocol)
pickle.dump(self.lr_goto,outf,pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str,p.name, p.len, p.func,p.file,p.line))
else:
outp.append((str(p),p.name,p.len,None,None,None))
pickle.dump(outp,outf,pickle_protocol)
outf.close()
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc,file,line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p: continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline))
grammar.append((file,dline,prodname,syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self,pdict,log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.files = {}
self.grammar = []
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_files()
return self.error
# Compute a signature over the grammar
def signature(self):
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
sig = md5()
if self.start:
sig.update(self.start.encode('latin-1'))
if self.prec:
sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1'))
if self.tokens:
sig.update(" ".join(self.tokens).encode('latin-1'))
for f in self.pfuncs:
if f[3]:
sig.update(f[3].encode('latin-1'))
except (TypeError,ValueError):
pass
return sig.digest()
# -----------------------------------------------------------------------------
# validate_file()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_files(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for filename in self.files.keys():
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea. Assume it's okay.
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
continue
counthash = { }
for linen,l in enumerate(lines):
linen += 1
m = fre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start,str):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func,types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = 1
return
eline = func_code(self.error_func).co_firstlineno
efile = func_code(self.error_func).co_filename
self.files[efile] = 1
if (func_code(self.error_func).co_argcount != 1+ismethod):
self.log.error("%s:%d: p_error() requires 1 argument",efile,eline)
self.error = 1
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = 1
return
terminals = {}
for n in self.tokens:
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get("precedence",None)
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec,(list,tuple)):
self.log.error("precedence must be a list or tuple")
self.error = 1
return
for level,p in enumerate(self.prec):
if not isinstance(p,(list,tuple)):
self.log.error("Bad precedence table")
self.error = 1
return
if len(p) < 2:
self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p)
self.error = 1
return
assoc = p[0]
if not isinstance(assoc,str):
self.log.error("precedence associativity must be a string")
self.error = 1
return
for term in p[1:]:
if not isinstance(term,str):
self.log.error("precedence items must be strings")
self.error = 1
return
preclist.append((term,assoc,level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if name[:2] != 'p_': continue
if name == 'p_error': continue
if isinstance(item,(types.FunctionType,types.MethodType)):
line = func_code(item).co_firstlineno
file = func_code(item).co_filename
p_functions.append((line,file,name,item.__doc__))
# Sort all of the actions by line number
p_functions.sort()
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error("no rules of the form p_rulename are defined")
self.error = 1
return
for line, file, name, doc in self.pfuncs:
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func_code(func).co_argcount > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__)
self.error = 1
elif func_code(func).co_argcount < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__)
self.error = 1
elif not func.__doc__:
self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__)
else:
try:
parsed_g = parse_grammar(doc,file,line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError:
e = sys.exc_info()[1]
self.log.error(str(e))
self.error = 1
# Looks like a valid grammar rule
# Mark the file in which defined.
self.files[file] = 1
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n,v in self.pdict.items():
if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue
if n[0:2] == 't_': continue
if n[0:2] == 'p_' and n != 'p_error':
self.log.warning("'%s' not defined as a function", n)
if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or
(isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)):
try:
doc = v.__doc__.split(" ")
if doc[1] == ':':
self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix",
func_code(v).co_filename, func_code(v).co_firstlineno,n)
except Exception:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='',
debuglog=None, errorlog = None, picklefile=None):
global parse # Reference to the parsing method of the last built parser
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
pdict = dict(_items)
else:
pdict = get_caller_module_dict(2)
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict,log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError("Unable to build parser")
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
except Exception:
e = sys.exc_info()[1]
errorlog.warning("There was a problem loading the table file: %s", repr(e))
except VersionError:
e = sys.exc_info()
errorlog.warning(str(e))
except Exception:
pass
if debuglog is None:
if debug:
debuglog = PlyLogger(open(debugfile,"w"))
else:
debuglog = NullLogger()
debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__)
errors = 0
# Validate the parser information
if pinfo.validate_all():
raise YaccError("Unable to build parser")
if not pinfo.error_func:
errorlog.warning("no p_error() function is defined")
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term,assoc,level)
except GrammarError:
e = sys.exc_info()[1]
errorlog.warning("%s",str(e))
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname,syms,funcname,file,line)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error("%s",str(e))
errors = 1
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error(str(e))
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym)
errors = 1
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info("")
debuglog.info("Unused terminals:")
debuglog.info("")
for term in unused_terminals:
errorlog.warning("Token '%s' defined, but not used", term)
debuglog.info(" %s", term)
# Print out all productions to the debug log
if debug:
debuglog.info("")
debuglog.info("Grammar")
debuglog.info("")
for n,p in enumerate(grammar.Productions):
debuglog.info("Rule %-5d %s", n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning("There is 1 unused token")
if len(unused_terminals) > 1:
errorlog.warning("There are %d unused tokens", len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning("There is 1 unused rule")
if len(unused_rules) > 1:
errorlog.warning("There are %d unused rules", len(unused_rules))
if debug:
debuglog.info("")
debuglog.info("Terminals, with rules where they appear")
debuglog.info("")
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]]))
debuglog.info("")
debuglog.info("Nonterminals, with rules where they appear")
debuglog.info("")
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info("")
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning("Symbol '%s' is unreachable",u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error("Infinite recursion detected for symbol '%s'", inf)
errors = 1
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term)
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug("Generating %s tables", method)
lr = LRGeneratedTable(grammar,method,debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning("1 shift/reduce conflict")
elif num_sr > 1:
errorlog.warning("%d shift/reduce conflicts", num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning("1 reduce/reduce conflict")
elif num_rr > 1:
errorlog.warning("%d reduce/reduce conflicts", num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning("")
debuglog.warning("Conflicts:")
debuglog.warning("")
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution)
already_reported = {}
for state, rule, rejected in lr.rr_conflicts:
if (state,id(rule),id(rejected)) in already_reported:
continue
debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
debuglog.warning("rejected rule (%s) in state %d", rejected,state)
errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
errorlog.warning("rejected rule (%s) in state %d", rejected, state)
already_reported[state,id(rule),id(rejected)] = 1
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning("Rule (%s) is never reduced", rejected)
errorlog.warning("Rule (%s) is never reduced", rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
lr.write_table(tabmodule,outputdir,signature)
# Write a pickled version of the tables
if picklefile:
lr.pickle_table(picklefile,signature)
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
| 39.210253
| 176
| 0.470948
|
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
__version__ = "3.4"
__tabversion__ = "3.2"
yaccdebug = 1
debug_file = 'parser.out' tab_module = 'parsetab' default_lr = 'LALR'
error_count = 3
yaccdevel = 0
resultlimit = 40
pickle_protocol = 0
import re, types, sys, os.path
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
try:
MAXINT = sys.maxint
except AttributeError:
MAXINT = sys.maxsize
def load_ply_lex():
if sys.version_info[0] < 3:
import lex
else:
import ply.lex as lex
return lex
class PlyLogger(object):
def __init__(self,f):
self.f = f
def debug(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
info = debug
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
critical = debug
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
class YaccError(Exception): pass
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit]+" ..."
result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str)
return result
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return "<%s @ 0x%x>" % (type(r).__name__,id(r))
class YaccSymbol:
def __str__(self): return self.type
def __repr__(self): return str(self)
class YaccProduction:
def __init__(self,s,stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser= None
def __getitem__(self,n):
if n >= 0: return self.slice[n].value
else: return self.stack[n].value
def __setitem__(self,n,v):
self.slice[n].value = v
def __getslice__(self,i,j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self,n):
return getattr(self.slice[n],"lineno",0)
def set_lineno(self,n,lineno):
self.slice[n].lineno = lineno
def linespan(self,n):
startline = getattr(self.slice[n],"lineno",0)
endline = getattr(self.slice[n],"endlineno",startline)
return startline,endline
def lexpos(self,n):
return getattr(self.slice[n],"lexpos",0)
def lexspan(self,n):
startpos = getattr(self.slice[n],"lexpos",0)
endpos = getattr(self.slice[n],"endlexpos",startpos)
return startpos,endpos
def error(self):
raise SyntaxError
class LRParser:
def __init__(self,lrtab,errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
def errok(self):
self.errorok = 1
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug,int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
elif tracking:
return self.parseopt(input,lexer,debug,tracking,tokenfunc)
else:
return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None):
lookahead = None lookaheadstack = [ ] actions = self.action goto = self.goto prod = self.productions pslice = YaccProduction(None) errorcount = 0
debug.info("PLY: PARSE DEBUG START")
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
pslice.lexer = lexer
pslice.parser = self
if input is not None:
lexer.input(input)
if tokenfunc is None:
get_token = lexer.token
else:
get_token = tokenfunc
statestack = [ ] self.statestack = statestack
symstack = [ ] self.symstack = symstack
pslice.stack = symstack errtoken = None
statestack.append(0)
sym = YaccSymbol()
sym.type = "$end"
symstack.append(sym)
state = 0
while 1:
# the next token off of the lookaheadstack or from the lexer
# --! DEBUG
debug.debug('')
debug.debug('State : %s', state)
# --! DEBUG
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = "$end"
# --! DEBUG
debug.debug('Stack : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
# --! DEBUG
debug.debug("Action : Shift and goto state %s", t)
# --! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
# --! DEBUG
if plen:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t)
else:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t)
# --! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n,"value",None)
# --! DEBUG
debug.info("Done : Returning %s", format_result(result))
debug.info("PLY: PARSE DEBUG END")
# --! DEBUG
return result
if t == None:
# --! DEBUG
debug.error('Error : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == "$end":
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != "$end":
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
if lookahead.type == "$end":
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1]
continue
raise RuntimeError("yacc: internal parser error!!!\n")
def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None lookaheadstack = [ ] actions = self.action goto = self.goto prod = self.productions pslice = YaccProduction(None) errorcount = 0
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
pslice.lexer = lexer
pslice.parser = self
if input is not None:
lexer.input(input)
if tokenfunc is None:
get_token = lexer.token
else:
get_token = tokenfunc
statestack = [ ] self.statestack = statestack
symstack = [ ] self.symstack = symstack
pslice.stack = symstack errtoken = None
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
if lookahead.type == '$end':
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1]
continue
raise RuntimeError("yacc: internal parser error!!!\n")
def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None lookaheadstack = [ ] actions = self.action goto = self.goto prod = self.productions pslice = YaccProduction(None) errorcount = 0
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
pslice.lexer = lexer
pslice.parser = self
if input is not None:
lexer.input(input)
if tokenfunc is None:
get_token = lexer.token
else:
get_token = tokenfunc
statestack = [ ] self.statestack = statestack
symstack = [ ] self.symstack = symstack
pslice.stack = symstack errtoken = None
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
if lookahead.type == '$end':
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1]
continue
raise RuntimeError("yacc: internal parser error!!!\n")
import re
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
class Production(object):
reduced = 0
def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
self.len = len(self.prod)
self.usyms = [ ]
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
self.lr_items = []
self.lr_next = None
if self.prod:
self.str = "%s -> %s" % (self.name," ".join(self.prod))
else:
self.str = "%s -> <empty>" % self.name
def __str__(self):
return self.str
def __repr__(self):
return "Production("+str(self)+")"
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self,index):
return self.prod[index]
def lr_item(self,n):
if n > len(self.prod): return None
p = LRItem(self,n)
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError,KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
class MiniProduction(object):
def __init__(self,str,name,len,func,file,line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return "MiniProduction(%s)" % self.str
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
class LRItem(object):
def __init__(self,p,n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = { }
self.prod.insert(n,".")
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = "%s -> %s" % (self.name," ".join(self.prod))
else:
s = "%s -> <empty>" % self.name
return s
def __repr__(self):
return "LRItem("+str(self)+")"
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
class GrammarError(YaccError): pass
class Grammar(object):
def __init__(self,terminals):
self.Productions = [None]
self.Prodnames = { }
self.Prodmap = { }
self.Terminals = { }
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = { }
self.First = { }
self.Follow = { }
self.Precedence = { }
self.UsedPrecedence = { }
self.Start = None
def __len__(self):
return len(self.Productions)
def __getitem__(self,index):
return self.Productions[index]
def set_precedence(self,term,assoc,level):
assert self.Productions == [None],"Must call set_precedence() before add_production()"
if term in self.Precedence:
raise GrammarError("Precedence already specified for terminal '%s'" % term)
if assoc not in ['left','right','nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc,level)
def add_production(self,prodname,syms,func=None,file='',line=0):
if prodname in self.Terminals:
raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname))
if prodname == 'error':
raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname))
if not _is_identifier.match(prodname):
raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname))
for n,s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname))
if not c in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line))
if syms[-2] != '%prec':
raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line))
precname = syms[-1]
prodprec = self.Precedence.get(precname,None)
if not prodprec:
raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname))
else:
self.UsedPrecedence[precname] = 1
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms,self.Terminals)
prodprec = self.Precedence.get(precname,('right',0))
# See if the rule is already in the rulemap
map = "%s -> %s" % (prodname,syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) +
"Previous definition at %s:%d" % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if not prodname in self.Nonterminals:
self.Nonterminals[prodname] = [ ]
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if not t in self.Nonterminals:
self.Nonterminals[t] = [ ]
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber,prodname,syms,prodprec,func,file,line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [ p ]
return 0
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self,start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError("start symbol %s undefined" % start)
self.Productions[0] = Production(0,"S'",[start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if reachable[s]:
# We've already reached symbol s.
return
reachable[s] = 1
for p in self.Prodnames.get(s,[]):
for r in p.prod:
mark_reachable_from(r)
reachable = { }
for s in list(self.Terminals) + list(self.Nonterminals):
reachable[s] = 0
mark_reachable_from( self.Productions[0].prod[0] )
return [s for s in list(self.Nonterminals)
if not reachable[s]]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = 1
terminates['$end'] = 1
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = 0
# Then propagate termination until no change:
while 1:
some_change = 0
for (n,pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = 0
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = 1
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = 1
some_change = 1
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s,term) in terminates.items():
if not term:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p: continue
for s in p.prod:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
result.append((s,p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s,v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s,v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname,self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self,beta):
# We are computing First(x1,x2,x3,...,xn)
result = [ ]
for x in beta:
x_produces_empty = 0
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = 1
else:
if f not in result: result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while 1:
some_change = 0
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append( f )
some_change = 1
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self,start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = [ ]
if not start:
start = self.Productions[1].name
self.Follow[start] = [ '$end' ]
while 1:
didadd = 0
for p in self.Productions[1:]:
# Here is the production set
for i in range(len(p.prod)):
B = p.prod[i]
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = 0
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if f == '<empty>':
hasempty = 1
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if not didadd: break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while 1:
if i > len(p):
lri = None
else:
lri = LRItem(p,i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError,KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri: break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError): pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self,module):
if isinstance(module,types.ModuleType):
parsetab = module
else:
if sys.version_info[0] < 3:
exec("import %s as parsetab" % module)
else:
env = { }
exec("import %s as parsetab" % module, env, env)
parsetab = env['parsetab']
if parsetab._tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self,filename):
try:
import cPickle as pickle
except ImportError:
import pickle
in_f = open(filename,"rb")
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self,pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X,R,FP):
N = { }
for x in X:
N[x] = 0
stack = []
F = { }
for x in X:
if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
return F
def traverse(x,N,stack,F,X,R,FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y,N,stack,F,X,R,FP)
N[x] = min(N[x],N[y])
for a in F.get(y,[]):
if a not in F[x]: F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError): pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self,grammar,method='LALR',log=None):
if method not in ['SLR','LALR']:
raise LALRError("Unsupported method %s" % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self,I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = 1
while didadd:
didadd = 0
for j in J:
for x in j.lr_after:
if getattr(x,"lr0_added",0) == self._add_count: continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = 1
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self,I,x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I),x),None)
if g: return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x,None)
if not s:
s = { }
self.lr_goto_cache[x] = s
gs = [ ]
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n),None)
if not s1:
s1 = { }
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end',None)
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I),x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = { }
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I,x)
if not g: continue
if id(g) in self.lr0_cidhash: continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = {}
num_nullable = 0
while 1:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable[p.name] = 1
continue
for t in p.prod:
if not t in nullable: break
else:
nullable[p.name] = 1
if len(nullable) == num_nullable: break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self,C):
trans = []
for state in range(len(C)):
for p in C[state]:
if p.lr_index < p.len - 1:
t = (state,p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans: trans.append(t)
state = state + 1
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self,C,trans,nullable):
dr_set = { }
state,N = trans
terms = []
g = self.lr0_goto(C[state],N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms: terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self,C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state],N)
j = self.lr0_cidhash.get(id(g),-1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j,a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self,C,trans,nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state,N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N: continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j,t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals: break # No forget it
if not p.prod[li] in nullable: break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j,t))
g = self.lr0_goto(C[j],t) # Go to next set
j = self.lr0_cidhash.get(id(g),-1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name: continue
if r.len != p.len: continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]: break
i = i + 1
else:
lookb.append((j,r))
for i in includes:
if not i in includedict: includedict[i] = []
includedict[i].append((state,N))
lookdict[(state,N)] = lookb
return lookdict,includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self,C, ntrans, nullable):
FP = lambda x: self.dr_relation(C,x,nullable)
R = lambda x: self.reads_relation(C,x,nullable)
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self,ntrans,readsets,inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x,[])
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self,lookbacks,followset):
for trans,lb in lookbacks.items():
# Loop over productions in lookback
for state,p in lb:
if not state in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans,[])
for a in f:
if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self,C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C,trans,nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C,trans,nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans,readsets,included)
# Add all of the lookaheads
self.add_lookaheads(lookd,followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = { } # Action production array (temporary)
log.info("Parsing method: %s", self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [ ] # List of actions
st_action = { }
st_actionp = { }
st_goto = { }
log.info("")
log.info("state %d", st)
log.info("")
for p in I:
log.info(" (%d) %s", p.number, str(p))
log.info("")
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action["$end"] = 0
st_actionp["$end"] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
r = st_action.get(a,None)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec,slevel = Productions[st_actionp[a].number].prec
rprec,rlevel = Precedence.get(a,('right',0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp,rejectp = pp,oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp,rejectp = oldp,pp
self.rr_conflicts.append((st,chosenp,rejectp))
log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a])
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I,a)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
# We are in a shift state
actlist.append((a,p,"shift and go to state %d" % j))
r = st_action.get(a,None)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError("Shift/shift conflict in state %d" % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec,rlevel = Productions[st_actionp[a].number].prec
sprec,slevel = Precedence.get(a,('right',0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = { }
for a,p,m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(" %-15s %s",a,m)
_actprint[(a,m)] = 1
log.info("")
# Print the actions that were not used. (debugging)
not_used = 0
for a,p,m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a,m) in _actprint:
log.debug(" ! %-15s [ %s ]",a,m)
not_used = 1
_actprint[(a,m)] = 1
if not_used:
log.debug("")
# Construct the goto table for this state
nkeys = { }
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I,n)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
st_goto[n] = j
log.info(" %-30s shift and go to state %d",n,j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self,modulename,outputdir='',signature=""):
basemodulename = modulename.split(".")[-1]
filename = os.path.join(outputdir,basemodulename) + ".py"
try:
f = open(filename,"w")
f.write("""
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
""" % (filename, __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = { }
for s,nd in self.lr_action.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_action_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
""")
else:
f.write("\n_lr_action = { ");
for k,v in self.lr_action.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
if smaller:
# Factor out names to try and make smaller
items = { }
for s,nd in self.lr_goto.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_goto_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
""")
else:
f.write("\n_lr_goto = { ");
for k,v in self.lr_goto.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
# Write production table
f.write("_lr_productions = [\n")
for p in self.lr_productions:
if p.func:
f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line))
else:
f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len))
f.write("]\n")
f.close()
except IOError:
e = sys.exc_info()[1]
sys.stderr.write("Unable to create '%s'\n" % filename)
sys.stderr.write(str(e)+"\n")
return
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self,filename,signature=""):
try:
import cPickle as pickle
except ImportError:
import pickle
outf = open(filename,"wb")
pickle.dump(__tabversion__,outf,pickle_protocol)
pickle.dump(self.lr_method,outf,pickle_protocol)
pickle.dump(signature,outf,pickle_protocol)
pickle.dump(self.lr_action,outf,pickle_protocol)
pickle.dump(self.lr_goto,outf,pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str,p.name, p.len, p.func,p.file,p.line))
else:
outp.append((str(p),p.name,p.len,None,None,None))
pickle.dump(outp,outf,pickle_protocol)
outf.close()
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc,file,line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p: continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline))
grammar.append((file,dline,prodname,syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self,pdict,log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.files = {}
self.grammar = []
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_files()
return self.error
# Compute a signature over the grammar
def signature(self):
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
sig = md5()
if self.start:
sig.update(self.start.encode('latin-1'))
if self.prec:
sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1'))
if self.tokens:
sig.update(" ".join(self.tokens).encode('latin-1'))
for f in self.pfuncs:
if f[3]:
sig.update(f[3].encode('latin-1'))
except (TypeError,ValueError):
pass
return sig.digest()
# -----------------------------------------------------------------------------
# validate_file()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_files(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for filename in self.files.keys():
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea. Assume it's okay.
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
continue
counthash = { }
for linen,l in enumerate(lines):
linen += 1
m = fre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start,str):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func,types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = 1
return
eline = func_code(self.error_func).co_firstlineno
efile = func_code(self.error_func).co_filename
self.files[efile] = 1
if (func_code(self.error_func).co_argcount != 1+ismethod):
self.log.error("%s:%d: p_error() requires 1 argument",efile,eline)
self.error = 1
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = 1
return
terminals = {}
for n in self.tokens:
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get("precedence",None)
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec,(list,tuple)):
self.log.error("precedence must be a list or tuple")
self.error = 1
return
for level,p in enumerate(self.prec):
if not isinstance(p,(list,tuple)):
self.log.error("Bad precedence table")
self.error = 1
return
if len(p) < 2:
self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p)
self.error = 1
return
assoc = p[0]
if not isinstance(assoc,str):
self.log.error("precedence associativity must be a string")
self.error = 1
return
for term in p[1:]:
if not isinstance(term,str):
self.log.error("precedence items must be strings")
self.error = 1
return
preclist.append((term,assoc,level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if name[:2] != 'p_': continue
if name == 'p_error': continue
if isinstance(item,(types.FunctionType,types.MethodType)):
line = func_code(item).co_firstlineno
file = func_code(item).co_filename
p_functions.append((line,file,name,item.__doc__))
# Sort all of the actions by line number
p_functions.sort()
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error("no rules of the form p_rulename are defined")
self.error = 1
return
for line, file, name, doc in self.pfuncs:
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func_code(func).co_argcount > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__)
self.error = 1
elif func_code(func).co_argcount < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__)
self.error = 1
elif not func.__doc__:
self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__)
else:
try:
parsed_g = parse_grammar(doc,file,line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError:
e = sys.exc_info()[1]
self.log.error(str(e))
self.error = 1
# Looks like a valid grammar rule
# Mark the file in which defined.
self.files[file] = 1
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n,v in self.pdict.items():
if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue
if n[0:2] == 't_': continue
if n[0:2] == 'p_' and n != 'p_error':
self.log.warning("'%s' not defined as a function", n)
if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or
(isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)):
try:
doc = v.__doc__.split(" ")
if doc[1] == ':':
self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix",
func_code(v).co_filename, func_code(v).co_firstlineno,n)
except Exception:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='',
debuglog=None, errorlog = None, picklefile=None):
global parse # Reference to the parsing method of the last built parser
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
pdict = dict(_items)
else:
pdict = get_caller_module_dict(2)
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict,log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError("Unable to build parser")
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
except Exception:
e = sys.exc_info()[1]
errorlog.warning("There was a problem loading the table file: %s", repr(e))
except VersionError:
e = sys.exc_info()
errorlog.warning(str(e))
except Exception:
pass
if debuglog is None:
if debug:
debuglog = PlyLogger(open(debugfile,"w"))
else:
debuglog = NullLogger()
debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__)
errors = 0
# Validate the parser information
if pinfo.validate_all():
raise YaccError("Unable to build parser")
if not pinfo.error_func:
errorlog.warning("no p_error() function is defined")
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term,assoc,level)
except GrammarError:
e = sys.exc_info()[1]
errorlog.warning("%s",str(e))
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname,syms,funcname,file,line)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error("%s",str(e))
errors = 1
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error(str(e))
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym)
errors = 1
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info("")
debuglog.info("Unused terminals:")
debuglog.info("")
for term in unused_terminals:
errorlog.warning("Token '%s' defined, but not used", term)
debuglog.info(" %s", term)
# Print out all productions to the debug log
if debug:
debuglog.info("")
debuglog.info("Grammar")
debuglog.info("")
for n,p in enumerate(grammar.Productions):
debuglog.info("Rule %-5d %s", n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning("There is 1 unused token")
if len(unused_terminals) > 1:
errorlog.warning("There are %d unused tokens", len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning("There is 1 unused rule")
if len(unused_rules) > 1:
errorlog.warning("There are %d unused rules", len(unused_rules))
if debug:
debuglog.info("")
debuglog.info("Terminals, with rules where they appear")
debuglog.info("")
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]]))
debuglog.info("")
debuglog.info("Nonterminals, with rules where they appear")
debuglog.info("")
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info("")
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning("Symbol '%s' is unreachable",u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error("Infinite recursion detected for symbol '%s'", inf)
errors = 1
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term)
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug("Generating %s tables", method)
lr = LRGeneratedTable(grammar,method,debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning("1 shift/reduce conflict")
elif num_sr > 1:
errorlog.warning("%d shift/reduce conflicts", num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning("1 reduce/reduce conflict")
elif num_rr > 1:
errorlog.warning("%d reduce/reduce conflicts", num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning("")
debuglog.warning("Conflicts:")
debuglog.warning("")
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution)
already_reported = {}
for state, rule, rejected in lr.rr_conflicts:
if (state,id(rule),id(rejected)) in already_reported:
continue
debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
debuglog.warning("rejected rule (%s) in state %d", rejected,state)
errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
errorlog.warning("rejected rule (%s) in state %d", rejected, state)
already_reported[state,id(rule),id(rejected)] = 1
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning("Rule (%s) is never reduced", rejected)
errorlog.warning("Rule (%s) is never reduced", rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
lr.write_table(tabmodule,outputdir,signature)
# Write a pickled version of the tables
if picklefile:
lr.pickle_table(picklefile,signature)
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
| true
| true
|
f7043a393c4a30b5d5056296579c11f81244b7d0
| 150
|
py
|
Python
|
common/enums/game_scenes.py
|
nikolastojsin/donkey-kong-drs-projekat
|
f7f837a7195aa731badb25d280c06317e9ada7d1
|
[
"MIT"
] | null | null | null |
common/enums/game_scenes.py
|
nikolastojsin/donkey-kong-drs-projekat
|
f7f837a7195aa731badb25d280c06317e9ada7d1
|
[
"MIT"
] | null | null | null |
common/enums/game_scenes.py
|
nikolastojsin/donkey-kong-drs-projekat
|
f7f837a7195aa731badb25d280c06317e9ada7d1
|
[
"MIT"
] | null | null | null |
from enum import Enum
class GameScenes(Enum):
FIRST_LEVEL = 1
SECOND_LEVEL = 2
THIRD_LEVEL = 3
FOURTH_LEVEL = 4
FIFTH_LEVEL = 5
| 15
| 23
| 0.66
|
from enum import Enum
class GameScenes(Enum):
FIRST_LEVEL = 1
SECOND_LEVEL = 2
THIRD_LEVEL = 3
FOURTH_LEVEL = 4
FIFTH_LEVEL = 5
| true
| true
|
f7043a4617de8a54b3d6ff2a89444c0826f42479
| 94
|
py
|
Python
|
web/apps/login/app.py
|
JW709/zoom
|
3b26a22e569bf44a9856b587771589413b52e81b
|
[
"MIT"
] | 1
|
2017-05-11T17:24:49.000Z
|
2017-05-11T17:24:49.000Z
|
web/apps/login/app.py
|
sean-hayes/zoom
|
eda69c64ceb69dd87d2f7a5dfdaeea52ef65c581
|
[
"MIT"
] | null | null | null |
web/apps/login/app.py
|
sean-hayes/zoom
|
eda69c64ceb69dd87d2f7a5dfdaeea52ef65c581
|
[
"MIT"
] | 1
|
2020-07-20T00:33:27.000Z
|
2020-07-20T00:33:27.000Z
|
"""
login app
"""
from zoom.apps import App
class MyApp(App):
pass
app = MyApp()
| 7.230769
| 25
| 0.574468
|
from zoom.apps import App
class MyApp(App):
pass
app = MyApp()
| true
| true
|
f7043a58b466f3f8e7c9d564fe527c55e1f9d6fe
| 825
|
py
|
Python
|
cohere-scripts/beamlines/aps_34idc/diffractometers.py
|
jacione/cohere-scripts
|
6bb111035660a57e18da5d86ad9dbf0f1d50c657
|
[
"BSD-3-Clause"
] | null | null | null |
cohere-scripts/beamlines/aps_34idc/diffractometers.py
|
jacione/cohere-scripts
|
6bb111035660a57e18da5d86ad9dbf0f1d50c657
|
[
"BSD-3-Clause"
] | null | null | null |
cohere-scripts/beamlines/aps_34idc/diffractometers.py
|
jacione/cohere-scripts
|
6bb111035660a57e18da5d86ad9dbf0f1d50c657
|
[
"BSD-3-Clause"
] | null | null | null |
from cohere import Diffractometer
class Diffractometer_34idc(Diffractometer):
"""
Subclass of Diffractometer. Encapsulates "34idc" diffractometer.
"""
name = "34idc"
sampleaxes = ('y+', 'z-', 'y+') # in xrayutilities notation
detectoraxes = ('y+', 'x-')
incidentaxis = (0, 0, 1)
sampleaxes_name = ('th', 'chi', 'phi') # using the spec mnemonics for scan id.
detectoraxes_name = ('delta', 'gamma')
def __init__(self):
super(Diffractometer_34idc, self).__init__('34idc')
def create_diffractometer(diff_name):
if diff_name == '34idc':
return Diffractometer_34idc()
else:
print ('diffractometer ' + diff_name + ' not defined.')
def verify_diffractometer(diff_name):
if diff_name == '34idc':
return True
else:
return False
| 25
| 83
| 0.637576
|
from cohere import Diffractometer
class Diffractometer_34idc(Diffractometer):
name = "34idc"
sampleaxes = ('y+', 'z-', 'y+') detectoraxes = ('y+', 'x-')
incidentaxis = (0, 0, 1)
sampleaxes_name = ('th', 'chi', 'phi') detectoraxes_name = ('delta', 'gamma')
def __init__(self):
super(Diffractometer_34idc, self).__init__('34idc')
def create_diffractometer(diff_name):
if diff_name == '34idc':
return Diffractometer_34idc()
else:
print ('diffractometer ' + diff_name + ' not defined.')
def verify_diffractometer(diff_name):
if diff_name == '34idc':
return True
else:
return False
| true
| true
|
f7043af4f416dadb6f4555672f0616879e32a468
| 1,479
|
py
|
Python
|
aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/CheckDomainSunriseClaimRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/CheckDomainSunriseClaimRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/CheckDomainSunriseClaimRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CheckDomainSunriseClaimRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'CheckDomainSunriseClaim')
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_UserClientIp(self):
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self,UserClientIp):
self.add_query_param('UserClientIp',UserClientIp)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
| 35.214286
| 79
| 0.765382
|
from aliyunsdkcore.request import RpcRequest
class CheckDomainSunriseClaimRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'CheckDomainSunriseClaim')
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_UserClientIp(self):
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self,UserClientIp):
self.add_query_param('UserClientIp',UserClientIp)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
| true
| true
|
f7043b12ee54ca5fb24b861efb48efdd876d80e1
| 1,484
|
py
|
Python
|
Boilermake2018/Lib/site-packages/chatterbot/preprocessors.py
|
TejPatel98/voice_your_professional_email
|
9cc48f7bcd6576a6962711755e5d5d485832128c
|
[
"CC0-1.0"
] | 9
|
2021-08-08T22:42:55.000Z
|
2021-11-23T06:50:30.000Z
|
Boilermake2018/Lib/site-packages/chatterbot/preprocessors.py
|
TejPatel98/voice_your_professional_email
|
9cc48f7bcd6576a6962711755e5d5d485832128c
|
[
"CC0-1.0"
] | 2
|
2017-12-06T07:40:08.000Z
|
2017-12-06T07:42:43.000Z
|
Boilermake2018/Lib/site-packages/chatterbot/preprocessors.py
|
TejPatel98/voice_your_professional_email
|
9cc48f7bcd6576a6962711755e5d5d485832128c
|
[
"CC0-1.0"
] | 7
|
2018-01-04T10:02:11.000Z
|
2019-06-18T14:24:04.000Z
|
# -*- coding: utf-8 -*-
"""
Statement pre-processors.
"""
def clean_whitespace(chatbot, statement):
"""
Remove any consecutive whitespace characters from the statement text.
"""
import re
# Replace linebreaks and tabs with spaces
statement.text = statement.text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
# Remove any leeding or trailing whitespace
statement.text = statement.text.strip()
# Remove consecutive spaces
statement.text = re.sub(' +', ' ', statement.text)
return statement
def unescape_html(chatbot, statement):
"""
Convert escaped html characters into unescaped html characters.
For example: "<b>" becomes "<b>".
"""
import sys
# Replace HTML escape characters
if sys.version_info[0] < 3:
from HTMLParser import HTMLParser
html = HTMLParser()
else:
import html
statement.text = html.unescape(statement.text)
return statement
def convert_to_ascii(chatbot, statement):
"""
Converts unicode characters to ASCII character equivalents.
For example: "på fédéral" becomes "pa federal".
"""
import unicodedata
import sys
# Normalize unicode characters
if sys.version_info[0] < 3:
statement.text = unicode(statement.text) # NOQA
text = unicodedata.normalize('NFKD', statement.text)
text = text.encode('ascii', 'ignore').decode('utf-8')
statement.text = str(text)
return statement
| 24.327869
| 92
| 0.654987
|
def clean_whitespace(chatbot, statement):
import re
statement.text = statement.text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
statement.text = statement.text.strip()
statement.text = re.sub(' +', ' ', statement.text)
return statement
def unescape_html(chatbot, statement):
import sys
if sys.version_info[0] < 3:
from HTMLParser import HTMLParser
html = HTMLParser()
else:
import html
statement.text = html.unescape(statement.text)
return statement
def convert_to_ascii(chatbot, statement):
import unicodedata
import sys
if sys.version_info[0] < 3:
statement.text = unicode(statement.text)
text = unicodedata.normalize('NFKD', statement.text)
text = text.encode('ascii', 'ignore').decode('utf-8')
statement.text = str(text)
return statement
| true
| true
|
f7043b3ecfc447c28853664acd21eddc6920523e
| 5,304
|
py
|
Python
|
NREL/custom/packages/nalu-wind/package.py
|
jfinney10/spack-configs
|
c230ade92794901eb3563dfc9a0e1ec370b6a27a
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 36
|
2018-07-31T20:35:13.000Z
|
2022-03-27T16:48:17.000Z
|
NREL/custom/packages/nalu-wind/package.py
|
jfinney10/spack-configs
|
c230ade92794901eb3563dfc9a0e1ec370b6a27a
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2018-08-08T16:25:34.000Z
|
2022-03-11T20:54:27.000Z
|
NREL/custom/packages/nalu-wind/package.py
|
jfinney10/spack-configs
|
c230ade92794901eb3563dfc9a0e1ec370b6a27a
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2018-07-31T20:47:10.000Z
|
2021-12-17T21:21:59.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import sys
class NaluWind(CMakePackage):
"""Nalu-Wind: Wind energy focused variant of Nalu."""
homepage = "https://github.com/exawind/nalu-wind"
git = "https://github.com/exawind/nalu-wind.git"
maintainers = ['jrood-nrel']
tags = ['ecp', 'ecp-apps']
version('master', branch='master')
# Options
variant('shared', default=(sys.platform != 'darwin'),
description='Build dependencies as shared libraries')
variant('pic', default=True,
description='Position independent code')
# Third party libraries
variant('cuda', default=False,
description='Compile with CUDA support')
variant('openfast', default=False,
description='Compile with OpenFAST support')
variant('tioga', default=False,
description='Compile with Tioga support')
variant('hypre', default=False,
description='Compile with Hypre support')
variant('catalyst', default=False,
description='Compile with Catalyst support')
variant('fftw', default=False,
description='Compile with FFTW support')
# Required dependencies
depends_on('mpi')
depends_on('yaml-cpp@0.5.3:', when='+shared')
depends_on('yaml-cpp~shared@0.5.3:', when='~shared')
# Cannot build Trilinos as a shared library with STK on Darwin
# which is why we have a 'shared' variant for Nalu-Wind
# https://github.com/trilinos/Trilinos/issues/2994
depends_on('trilinos+exodus+tpetra+muelu+belos+ifpack2+amesos2+zoltan+stk+boost~superlu-dist+superlu+hdf5+zlib+pnetcdf+shards~hypre@master,develop', when='+shared')
depends_on('trilinos~shared+exodus+tpetra+muelu+belos+ifpack2+amesos2+zoltan+stk+boost~superlu-dist+superlu+hdf5+zlib+pnetcdf+shards~hypre@master,develop', when='~shared')
depends_on('trilinos~shared+cuda+exodus+tpetra+muelu+belos+ifpack2+amesos2+zoltan+stk+boost~superlu-dist+superlu+hdf5+zlib+pnetcdf+shards~hypre@master,develop', when='+cuda')
# Optional dependencies
depends_on('openfast+cxx', when='+openfast+shared')
depends_on('openfast+cxx~shared', when='+openfast~shared')
depends_on('tioga', when='+tioga+shared')
depends_on('tioga~shared', when='+tioga~shared')
depends_on('hypre+mpi+int64', when='+hypre+shared')
depends_on('hypre+mpi+int64~shared', when='+hypre~shared')
depends_on('trilinos-catalyst-ioss-adapter', when='+catalyst')
# FFTW doesn't have a 'shared' variant at this moment
depends_on('fftw+mpi', when='+fftw')
depends_on('cuda', when='+cuda')
def setup_environment(self, spack_env, run_env):
if '+cuda' in self.spec:
spack_env.set('NVCC_WRAPPER_DEFAULT_COMPILER', spack_cxx)
def cmake_args(self):
spec = self.spec
options = []
options.extend([
'-DCMAKE_Fortran_COMPILER=%s' % spec['mpi'].mpifc,
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
])
if '+cuda' in self.spec:
options.extend([
'-DCMAKE_CXX_COMPILER=%s' % join_path(self.spec['trilinos'].prefix, 'bin', 'nvcc_wrapper'),
])
else:
options.extend([
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
])
options.extend([
'-DTrilinos_DIR:PATH=%s' % spec['trilinos'].prefix,
'-DYAML_DIR:PATH=%s' % spec['yaml-cpp'].prefix,
'-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=%s' % (
'ON' if '+pic' in spec else 'OFF'),
])
if '+openfast' in spec:
options.extend([
'-DENABLE_OPENFAST:BOOL=ON',
'-DOpenFAST_DIR:PATH=%s' % spec['openfast'].prefix
])
else:
options.append('-DENABLE_OPENFAST:BOOL=OFF')
if '+tioga' in spec:
options.extend([
'-DENABLE_TIOGA:BOOL=ON',
'-DTIOGA_DIR:PATH=%s' % spec['tioga'].prefix
])
else:
options.append('-DENABLE_TIOGA:BOOL=OFF')
if '+hypre' in spec:
options.extend([
'-DENABLE_HYPRE:BOOL=ON',
'-DHYPRE_DIR:PATH=%s' % spec['hypre'].prefix
])
else:
options.append('-DENABLE_HYPRE:BOOL=OFF')
if '+catalyst' in spec:
options.extend([
'-DENABLE_PARAVIEW_CATALYST:BOOL=ON',
'-DPARAVIEW_CATALYST_INSTALL_PATH:PATH=%s' %
spec['trilinos-catalyst-ioss-adapter'].prefix
])
else:
options.append('-DENABLE_PARAVIEW_CATALYST:BOOL=OFF')
if '+fftw' in spec:
options.extend([
'-DENABLE_FFTW:BOOL=ON',
'-DFFTW_DIR:PATH=%s' % spec['fftw'].prefix
])
else:
options.append('-DENABLE_FFTW:BOOL=OFF')
if '+cuda' in spec:
options.extend([
'-DENABLE_CUDA:BOOL=ON',
])
if 'darwin' in spec.architecture:
options.append('-DCMAKE_MACOSX_RPATH:BOOL=ON')
return options
| 37.352113
| 178
| 0.601621
|
from spack import *
import sys
class NaluWind(CMakePackage):
homepage = "https://github.com/exawind/nalu-wind"
git = "https://github.com/exawind/nalu-wind.git"
maintainers = ['jrood-nrel']
tags = ['ecp', 'ecp-apps']
version('master', branch='master')
variant('shared', default=(sys.platform != 'darwin'),
description='Build dependencies as shared libraries')
variant('pic', default=True,
description='Position independent code')
variant('cuda', default=False,
description='Compile with CUDA support')
variant('openfast', default=False,
description='Compile with OpenFAST support')
variant('tioga', default=False,
description='Compile with Tioga support')
variant('hypre', default=False,
description='Compile with Hypre support')
variant('catalyst', default=False,
description='Compile with Catalyst support')
variant('fftw', default=False,
description='Compile with FFTW support')
depends_on('mpi')
depends_on('yaml-cpp@0.5.3:', when='+shared')
depends_on('yaml-cpp~shared@0.5.3:', when='~shared')
depends_on('trilinos+exodus+tpetra+muelu+belos+ifpack2+amesos2+zoltan+stk+boost~superlu-dist+superlu+hdf5+zlib+pnetcdf+shards~hypre@master,develop', when='+shared')
depends_on('trilinos~shared+exodus+tpetra+muelu+belos+ifpack2+amesos2+zoltan+stk+boost~superlu-dist+superlu+hdf5+zlib+pnetcdf+shards~hypre@master,develop', when='~shared')
depends_on('trilinos~shared+cuda+exodus+tpetra+muelu+belos+ifpack2+amesos2+zoltan+stk+boost~superlu-dist+superlu+hdf5+zlib+pnetcdf+shards~hypre@master,develop', when='+cuda')
depends_on('openfast+cxx', when='+openfast+shared')
depends_on('openfast+cxx~shared', when='+openfast~shared')
depends_on('tioga', when='+tioga+shared')
depends_on('tioga~shared', when='+tioga~shared')
depends_on('hypre+mpi+int64', when='+hypre+shared')
depends_on('hypre+mpi+int64~shared', when='+hypre~shared')
depends_on('trilinos-catalyst-ioss-adapter', when='+catalyst')
depends_on('fftw+mpi', when='+fftw')
depends_on('cuda', when='+cuda')
def setup_environment(self, spack_env, run_env):
if '+cuda' in self.spec:
spack_env.set('NVCC_WRAPPER_DEFAULT_COMPILER', spack_cxx)
def cmake_args(self):
spec = self.spec
options = []
options.extend([
'-DCMAKE_Fortran_COMPILER=%s' % spec['mpi'].mpifc,
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
])
if '+cuda' in self.spec:
options.extend([
'-DCMAKE_CXX_COMPILER=%s' % join_path(self.spec['trilinos'].prefix, 'bin', 'nvcc_wrapper'),
])
else:
options.extend([
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
])
options.extend([
'-DTrilinos_DIR:PATH=%s' % spec['trilinos'].prefix,
'-DYAML_DIR:PATH=%s' % spec['yaml-cpp'].prefix,
'-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=%s' % (
'ON' if '+pic' in spec else 'OFF'),
])
if '+openfast' in spec:
options.extend([
'-DENABLE_OPENFAST:BOOL=ON',
'-DOpenFAST_DIR:PATH=%s' % spec['openfast'].prefix
])
else:
options.append('-DENABLE_OPENFAST:BOOL=OFF')
if '+tioga' in spec:
options.extend([
'-DENABLE_TIOGA:BOOL=ON',
'-DTIOGA_DIR:PATH=%s' % spec['tioga'].prefix
])
else:
options.append('-DENABLE_TIOGA:BOOL=OFF')
if '+hypre' in spec:
options.extend([
'-DENABLE_HYPRE:BOOL=ON',
'-DHYPRE_DIR:PATH=%s' % spec['hypre'].prefix
])
else:
options.append('-DENABLE_HYPRE:BOOL=OFF')
if '+catalyst' in spec:
options.extend([
'-DENABLE_PARAVIEW_CATALYST:BOOL=ON',
'-DPARAVIEW_CATALYST_INSTALL_PATH:PATH=%s' %
spec['trilinos-catalyst-ioss-adapter'].prefix
])
else:
options.append('-DENABLE_PARAVIEW_CATALYST:BOOL=OFF')
if '+fftw' in spec:
options.extend([
'-DENABLE_FFTW:BOOL=ON',
'-DFFTW_DIR:PATH=%s' % spec['fftw'].prefix
])
else:
options.append('-DENABLE_FFTW:BOOL=OFF')
if '+cuda' in spec:
options.extend([
'-DENABLE_CUDA:BOOL=ON',
])
if 'darwin' in spec.architecture:
options.append('-DCMAKE_MACOSX_RPATH:BOOL=ON')
return options
| true
| true
|
f7043bacf01e5c86f3a51de97e89c88870e9d8c2
| 202
|
py
|
Python
|
Kattis/ostgotska.py
|
ruidazeng/online-judge
|
6bdf8bbf1af885637dab474d0ccb58aff22a0933
|
[
"MIT"
] | null | null | null |
Kattis/ostgotska.py
|
ruidazeng/online-judge
|
6bdf8bbf1af885637dab474d0ccb58aff22a0933
|
[
"MIT"
] | null | null | null |
Kattis/ostgotska.py
|
ruidazeng/online-judge
|
6bdf8bbf1af885637dab474d0ccb58aff22a0933
|
[
"MIT"
] | 1
|
2020-06-22T21:07:24.000Z
|
2020-06-22T21:07:24.000Z
|
sentence = input().split()
ae = 0
for word in sentence:
if 'ae' in word:
ae += 1
if ae/len(sentence) >= 0.4:
print("dae ae ju traeligt va")
else:
print("haer talar vi rikssvenska")
| 18.363636
| 38
| 0.59901
|
sentence = input().split()
ae = 0
for word in sentence:
if 'ae' in word:
ae += 1
if ae/len(sentence) >= 0.4:
print("dae ae ju traeligt va")
else:
print("haer talar vi rikssvenska")
| true
| true
|
f7043cb19891a8f93b1476ce2dcbdead32e526d8
| 117,170
|
py
|
Python
|
env/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.py
|
lindamar/ecclesi
|
cad07fc78daf6facd1b74cc1cb1872aaf4771fa2
|
[
"MIT"
] | 168
|
2015-05-29T13:56:01.000Z
|
2022-02-17T07:38:17.000Z
|
env/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.py
|
lindamar/ecclesi
|
cad07fc78daf6facd1b74cc1cb1872aaf4771fa2
|
[
"MIT"
] | 3,243
|
2017-02-07T15:30:01.000Z
|
2022-03-31T16:42:19.000Z
|
env/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.py
|
lindamar/ecclesi
|
cad07fc78daf6facd1b74cc1cb1872aaf4771fa2
|
[
"MIT"
] | 210
|
2017-09-01T00:10:08.000Z
|
2022-03-19T18:05:12.000Z
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import with_metaclass, viewkeys, PY3
import types
try:
from collections import OrderedDict
except ImportError:
from pip._vendor.ordereddict import OrderedDict
from . import _inputstream
from . import _tokenizer
from . import treebuilders
from .treebuilders.base import Marker
from . import _utils
from .constants import (
spaceCharacters, asciiUpper2Lower,
specialElements, headingElements, cdataElements, rcdataElements,
tokenTypes, tagTokenTypes,
namespaces,
htmlIntegrationPointElements, mathmlTextIntegrationPointElements,
adjustForeignAttributes as adjustForeignAttributesMap,
adjustMathMLAttributes, adjustSVGAttributes,
E,
ReparseException
)
def parse(doc, treebuilder="etree", namespaceHTMLElements=True, **kwargs):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, **kwargs)
def parseFragment(doc, container="div", treebuilder="etree", namespaceHTMLElements=True, **kwargs):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, **kwargs)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree=None, strict=False, namespaceHTMLElements=True, debug=False):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
"""
# Raise an exception on the first error encountered
self.strict = strict
if tree is None:
tree = treebuilders.getTreeBuilder("etree")
self.tree = tree(namespaceHTMLElements)
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).items()])
def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.scripting = scripting
self.tokenizer = _tokenizer.HTMLTokenizer(stream, parser=self, **kwargs)
self.reset()
try:
self.mainLoop()
except ReparseException:
self.reset()
self.mainLoop()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] # only used with debug mode
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False # pylint:disable=redefined-variable-type
self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
@property
def documentEncoding(self):
"""The name of the character encoding
that was used to decode the input stream,
or :obj:`None` if that is not determined yet.
"""
if not hasattr(self, 'tokenizer'):
return None
return self.tokenizer.stream.charEncoding[0].name
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
prev_token = None
new_token = token
while new_token is not None:
prev_token = new_token
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
type == StartTagToken and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token = phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and prev_token["selfClosing"] and
not prev_token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name": prev_token["name"]})
# When the loop finishes it's EOF
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, *args, **kwargs):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
scripting - treat noscript elements as if javascript was turned on
"""
self._parse(stream, False, None, *args, **kwargs)
return self.tree.getDocument()
def parseFragment(self, stream, *args, **kwargs):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
scripting - treat noscript elements as if javascript was turned on
"""
self._parse(stream, True, *args, **kwargs)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars=None):
# XXX The idea is to make errorcode mandatory.
if datavars is None:
datavars = {}
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError(E[errorcode] % datavars)
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
raw = token["data"]
token["data"] = OrderedDict(raw)
if len(raw) > len(token["data"]):
# we had some duplicated attribute, fix so first wins
token["data"].update(raw[::-1])
return token
def adjustMathMLAttributes(self, token):
adjust_attributes(token, adjustMathMLAttributes)
def adjustSVGAttributes(self, token):
adjust_attributes(token, adjustSVGAttributes)
def adjustForeignAttributes(self, token):
adjust_attributes(token, adjustForeignAttributesMap)
def reparseTokenNormal(self, token):
# pylint:disable=unused-argument
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select": "inSelect",
"td": "inCell",
"th": "inCell",
"tr": "inRow",
"tbody": "inTableBody",
"thead": "inTableBody",
"tfoot": "inTableBody",
"caption": "inCaption",
"colgroup": "inColumnGroup",
"table": "inTable",
"head": "inBody",
"body": "inBody",
"frameset": "inFrameset",
"html": "beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
"""Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT
"""
assert contentType in ("RAWTEXT", "RCDATA")
self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
@_utils.memoize
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
type_names = dict((value, key) for key, value in
tokenTypes.items())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type": type_names[token['type']]}
except:
raise
if token['type'] in tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
# pylint:disable=unused-argument
class Phase(with_metaclass(getMetaclass(debug, log))):
"""Base class for helper object that implements each phase of processing
"""
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if not self.parser.firstStartTag and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].items():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId is not None or
systemId is not None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html" or
publicId.startswith(
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//")) or
publicId in ("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html") or
publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is None or
systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (publicId.startswith(
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//")) or
publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is not None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noframes", "style"), self.startTagNoFramesStyle),
("noscript", self.startTagNoscript),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif ("content" in attributes and
"http-equiv" in attributes and
attributes["http-equiv"].lower() == "content-type"):
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = _inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = _inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoFramesStyle(self, token):
# Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagNoscript(self, token):
if self.parser.scripting:
self.parser.parseRCDataRawtext(token, "RAWTEXT")
else:
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inHeadNoscript"]
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s" % node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
class InHeadNoscriptPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("basefont", "bgsound", "link", "meta", "noframes", "style"), self.startTagBaseLinkCommand),
(("head", "noscript"), self.startTagHeadNoscript),
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("noscript", self.endTagNoscript),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.parseError("eof-in-head-noscript")
self.anythingElse()
return True
def processComment(self, token):
return self.parser.phases["inHead"].processComment(token)
def processCharacters(self, token):
self.parser.parseError("char-in-head-noscript")
self.anythingElse()
return token
def processSpaceCharacters(self, token):
return self.parser.phases["inHead"].processSpaceCharacters(token)
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBaseLinkCommand(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagHeadNoscript(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]})
self.anythingElse()
return token
def endTagNoscript(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "noscript", "Expected noscript got %s" % node.name
self.parser.phase = self.parser.phases["inHead"]
def endTagBr(self, token):
self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]})
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
# Caller must raise parse error first!
self.endTagNoscript(impliedTagToken("noscript"))
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
# Set this to the default handler
self.processSpaceCharacters = self.processSpaceCharactersNonPre
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext", self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"), self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
("noscript", self.startTagNoscript),
(("noembed", "noframes"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("body", self.endTagBody),
("html", self.endTagHtml),
(("address", "article", "aside", "blockquote", "button", "center",
"details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p", self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
return (node1.name == node2.name and
node1.namespace == node2.namespace and
node1.attributes == node2.attributes)
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
# Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea") and
not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == "\u0000":
# The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
# This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharactersNonPre(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1 or
self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].items():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError("unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li": ["li"],
"dt": ["dt", "dd"],
"dd": ["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
# input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type": tokenTypes["Characters"], "data": prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes=attributes,
selfClosing=token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagNoscript(self, token):
if self.parser.scripting:
self.startTagRawtext(token)
else:
self.startTagOther(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
# Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"gotName": "body", "expectedName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
# We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
# Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name": "form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867
# XXX Better parseError messages appreciated.
# Step 1
outerLoopCounter = 0
# Step 2
while outerLoopCounter < 8:
# Step 3
outerLoopCounter += 1
# Step 4:
# Let the formatting element be the last element in
# the list of active formatting elements that:
# - is between the end of the list and the last scope
# marker in the list, if any, or the start of the list
# otherwise, and
# - has the same tag name as the token.
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
# If there is no such node, then abort these steps
# and instead act as described in the "any other
# end tag" entry below.
self.endTagOther(token)
return
# Otherwise, if there is such a node, but that node is
# not in the stack of open elements, then this is a
# parse error; remove the element from the list, and
# abort these steps.
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Otherwise, if there is such a node, and that node is
# also in the stack of open elements, but the element
# is not in scope, then this is a parse error; ignore
# the token, and abort these steps.
elif not self.tree.elementInScope(formattingElement.name):
self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
return
# Otherwise, there is a formatting element and that
# element is in the stack and is in scope. If the
# element is not the current node, this is a parse
# error. In any case, proceed with the algorithm as
# written in the following steps.
else:
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 5:
# Let the furthest block be the topmost node in the
# stack of open elements that is lower in the stack
# than the formatting element, and is an element in
# the special category. There might not be one.
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
# Step 6:
# If there is no furthest block, then the UA must
# first pop all the nodes from the bottom of the stack
# of open elements, from the current node up to and
# including the formatting element, then remove the
# formatting element from the list of active
# formatting elements, and finally abort these steps.
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
# Step 7
commonAncestor = self.tree.openElements[afeIndex - 1]
# Step 8:
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 15. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 9.7
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 9
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
# Node is element before node in open elements
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
# Step 9.6
if node == formattingElement:
break
# Step 9.7
if lastNode == furthestBlock:
bookmark = self.tree.activeFormattingElements.index(node) + 1
# Step 9.8
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 9.9
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 9.10
lastNode = node
# Step 10
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster
# parent the lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
# Step 11
clone = formattingElement.cloneNode()
# Step 12
furthestBlock.reparentChildren(clone)
# Step 13
furthestBlock.appendChild(clone)
# Step 14
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 15
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
# The rest of this method is all stuff that only happens if
# document.write works
def endTagOther(self, token):
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
# Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
# If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type": tokenTypes["Characters"], "data": data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
# pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
# XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == "\u0000":
token["data"] = "\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
{"name": token["name"]})
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name.translate(asciiUpper2Lower) != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
# XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self, name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
# pylint:enable=unused-argument
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
"inHeadNoscript": InHeadNoscriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def adjust_attributes(token, replacements):
if PY3 or _utils.PY27:
needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
else:
needs_adjustment = frozenset(token['data']) & frozenset(replacements)
if needs_adjustment:
token['data'] = OrderedDict((replacements.get(k, k), v)
for k, v in token['data'].items())
def impliedTagToken(name, type="EndTag", attributes=None,
selfClosing=False):
if attributes is None:
attributes = {}
return {"type": tokenTypes[type], "name": name, "data": attributes,
"selfClosing": selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
| 42.85662
| 116
| 0.547546
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import with_metaclass, viewkeys, PY3
import types
try:
from collections import OrderedDict
except ImportError:
from pip._vendor.ordereddict import OrderedDict
from . import _inputstream
from . import _tokenizer
from . import treebuilders
from .treebuilders.base import Marker
from . import _utils
from .constants import (
spaceCharacters, asciiUpper2Lower,
specialElements, headingElements, cdataElements, rcdataElements,
tokenTypes, tagTokenTypes,
namespaces,
htmlIntegrationPointElements, mathmlTextIntegrationPointElements,
adjustForeignAttributes as adjustForeignAttributesMap,
adjustMathMLAttributes, adjustSVGAttributes,
E,
ReparseException
)
def parse(doc, treebuilder="etree", namespaceHTMLElements=True, **kwargs):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, **kwargs)
def parseFragment(doc, container="div", treebuilder="etree", namespaceHTMLElements=True, **kwargs):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, **kwargs)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
def __init__(self, tree=None, strict=False, namespaceHTMLElements=True, debug=False):
self.strict = strict
if tree is None:
tree = treebuilders.getTreeBuilder("etree")
self.tree = tree(namespaceHTMLElements)
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).items()])
def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.scripting = scripting
self.tokenizer = _tokenizer.HTMLTokenizer(stream, parser=self, **kwargs)
self.reset()
try:
self.mainLoop()
except ReparseException:
self.reset()
self.mainLoop()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
@property
def documentEncoding(self):
if not hasattr(self, 'tokenizer'):
return None
return self.tokenizer.stream.charEncoding[0].name
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
prev_token = None
new_token = token
while new_token is not None:
prev_token = new_token
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
type == StartTagToken and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token = phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and prev_token["selfClosing"] and
not prev_token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name": prev_token["name"]})
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, *args, **kwargs):
self._parse(stream, False, None, *args, **kwargs)
return self.tree.getDocument()
def parseFragment(self, stream, *args, **kwargs):
self._parse(stream, True, *args, **kwargs)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars=None):
# XXX The idea is to make errorcode mandatory.
if datavars is None:
datavars = {}
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError(E[errorcode] % datavars)
def normalizeToken(self, token):
if token["type"] == tokenTypes["StartTag"]:
raw = token["data"]
token["data"] = OrderedDict(raw)
if len(raw) > len(token["data"]):
# we had some duplicated attribute, fix so first wins
token["data"].update(raw[::-1])
return token
def adjustMathMLAttributes(self, token):
adjust_attributes(token, adjustMathMLAttributes)
def adjustSVGAttributes(self, token):
adjust_attributes(token, adjustSVGAttributes)
def adjustForeignAttributes(self, token):
adjust_attributes(token, adjustForeignAttributesMap)
def reparseTokenNormal(self, token):
# pylint:disable=unused-argument
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
last = False
newModes = {
"select": "inSelect",
"td": "inCell",
"th": "inCell",
"tr": "inRow",
"tbody": "inTableBody",
"thead": "inTableBody",
"tfoot": "inTableBody",
"caption": "inCaption",
"colgroup": "inColumnGroup",
"table": "inTable",
"head": "inBody",
"body": "inBody",
"frameset": "inFrameset",
"html": "beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
assert contentType in ("RAWTEXT", "RCDATA")
self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
@_utils.memoize
def getPhases(debug):
def log(function):
type_names = dict((value, key) for key, value in
tokenTypes.items())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type": type_names[token['type']]}
except:
raise
if token['type'] in tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
class Phase(with_metaclass(getMetaclass(debug, log))):
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if not self.parser.firstStartTag and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].items():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId is not None or
systemId is not None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html" or
publicId.startswith(
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//")) or
publicId in ("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html") or
publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is None or
systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (publicId.startswith(
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//")) or
publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is not None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noframes", "style"), self.startTagNoFramesStyle),
("noscript", self.startTagNoscript),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif ("content" in attributes and
"http-equiv" in attributes and
attributes["http-equiv"].lower() == "content-type"):
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = _inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = _inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoFramesStyle(self, token):
# Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagNoscript(self, token):
if self.parser.scripting:
self.parser.parseRCDataRawtext(token, "RAWTEXT")
else:
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inHeadNoscript"]
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s" % node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
class InHeadNoscriptPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("basefont", "bgsound", "link", "meta", "noframes", "style"), self.startTagBaseLinkCommand),
(("head", "noscript"), self.startTagHeadNoscript),
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("noscript", self.endTagNoscript),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.parseError("eof-in-head-noscript")
self.anythingElse()
return True
def processComment(self, token):
return self.parser.phases["inHead"].processComment(token)
def processCharacters(self, token):
self.parser.parseError("char-in-head-noscript")
self.anythingElse()
return token
def processSpaceCharacters(self, token):
return self.parser.phases["inHead"].processSpaceCharacters(token)
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBaseLinkCommand(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagHeadNoscript(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]})
self.anythingElse()
return token
def endTagNoscript(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "noscript", "Expected noscript got %s" % node.name
self.parser.phase = self.parser.phases["inHead"]
def endTagBr(self, token):
self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]})
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
# Caller must raise parse error first!
self.endTagNoscript(impliedTagToken("noscript"))
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
# Set this to the default handler
self.processSpaceCharacters = self.processSpaceCharactersNonPre
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext", self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"), self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
("noscript", self.startTagNoscript),
(("noembed", "noframes"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("body", self.endTagBody),
("html", self.endTagHtml),
(("address", "article", "aside", "blockquote", "button", "center",
"details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p", self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
return (node1.name == node2.name and
node1.namespace == node2.namespace and
node1.attributes == node2.attributes)
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
# Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea") and
not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == "\u0000":
# The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
# This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharactersNonPre(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1 or
self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].items():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError("unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li": ["li"],
"dt": ["dt", "dd"],
"dd": ["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
# input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type": tokenTypes["Characters"], "data": prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes=attributes,
selfClosing=token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagNoscript(self, token):
if self.parser.scripting:
self.startTagRawtext(token)
else:
self.startTagOther(token)
def startTagRawtext(self, token):
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"gotName": "body", "expectedName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name": "form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
outerLoopCounter = 0
while outerLoopCounter < 8:
outerLoopCounter += 1
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
# end tag" entry below.
self.endTagOther(token)
return
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
elif not self.tree.elementInScope(formattingElement.name):
self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
return
else:
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
commonAncestor = self.tree.openElements[afeIndex - 1]
bookmark = self.tree.activeFormattingElements.index(formattingElement)
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
if node == formattingElement:
break
if lastNode == furthestBlock:
bookmark = self.tree.activeFormattingElements.index(node) + 1
clone = node.cloneNode()
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
lastNode = node
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
clone = formattingElement.cloneNode()
furthestBlock.reparentChildren(clone)
furthestBlock.appendChild(clone)
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
def endTagOther(self, token):
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def clearStackToTableContext(self):
while self.tree.openElements[-1].name not in ("table", "html"):
self.tree.openElements.pop()
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
# Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
# If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type": tokenTypes["Characters"], "data": data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
# pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
# XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == "\u0000":
token["data"] = "\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
{"name": token["name"]})
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name.translate(asciiUpper2Lower) != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
# XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self, name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
# pylint:enable=unused-argument
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
"inHeadNoscript": InHeadNoscriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def adjust_attributes(token, replacements):
if PY3 or _utils.PY27:
needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
else:
needs_adjustment = frozenset(token['data']) & frozenset(replacements)
if needs_adjustment:
token['data'] = OrderedDict((replacements.get(k, k), v)
for k, v in token['data'].items())
def impliedTagToken(name, type="EndTag", attributes=None,
selfClosing=False):
if attributes is None:
attributes = {}
return {"type": tokenTypes[type], "name": name, "data": attributes,
"selfClosing": selfClosing}
class ParseError(Exception):
pass
| true
| true
|
f7043ccf5a72a6b1cf26416838a2233e35f68a0c
| 732
|
py
|
Python
|
doc/_ext/rst_roles.py
|
CarsonSlovoka/image-rename
|
6ff64647aa893ee5c23bfd7e8cc452a7a7d32f29
|
[
"BSD-3-Clause"
] | 2
|
2020-07-03T12:56:17.000Z
|
2021-07-07T16:56:12.000Z
|
doc/_ext/rst_roles.py
|
CarsonSlovoka/image-rename
|
6ff64647aa893ee5c23bfd7e8cc452a7a7d32f29
|
[
"BSD-3-Clause"
] | null | null | null |
doc/_ext/rst_roles.py
|
CarsonSlovoka/image-rename
|
6ff64647aa893ee5c23bfd7e8cc452a7a7d32f29
|
[
"BSD-3-Clause"
] | null | null | null |
from docutils.parsers.rst import roles
from docutils import nodes
from docutils.parsers.rst.states import Inliner
import docutils.parsers.rst.roles
def strike_role(role, rawtext, text, lineno, inliner: Inliner, options={}, content=[]):
"""
USAGE: :del:`your context`
:param role: my-strike
:param rawtext: :my-strike:`your context`
:param text: your context
:param lineno:
:param inliner:
:param options:
:param content:
:return:
"""
# roles.set_classes(options)
# options.setdefault('classes', []).append("mys")
node = nodes.inline(rawtext, text, **dict(classes=['strike']))
return [node], []
def setup(app):
roles.register_canonical_role('del', strike_role)
| 24.4
| 87
| 0.670765
|
from docutils.parsers.rst import roles
from docutils import nodes
from docutils.parsers.rst.states import Inliner
import docutils.parsers.rst.roles
def strike_role(role, rawtext, text, lineno, inliner: Inliner, options={}, content=[]):
node = nodes.inline(rawtext, text, **dict(classes=['strike']))
return [node], []
def setup(app):
roles.register_canonical_role('del', strike_role)
| true
| true
|
f7043dd4a34f458d08244ea0a3dea781fe8dba49
| 24
|
py
|
Python
|
python_code.py
|
vervainalthor/Coursera-Capstone
|
b6a5e4ec2c62cba0b212709c9d8d8d8ee3f6b12f
|
[
"MIT"
] | null | null | null |
python_code.py
|
vervainalthor/Coursera-Capstone
|
b6a5e4ec2c62cba0b212709c9d8d8d8ee3f6b12f
|
[
"MIT"
] | 1
|
2021-03-31T19:41:58.000Z
|
2021-03-31T19:41:58.000Z
|
python_code.py
|
vervainalthor/Coursera-Capstone
|
b6a5e4ec2c62cba0b212709c9d8d8d8ee3f6b12f
|
[
"MIT"
] | 16
|
2020-04-13T21:15:59.000Z
|
2021-07-11T12:13:57.000Z
|
print("Hello Github!")
| 8
| 22
| 0.666667
|
print("Hello Github!")
| true
| true
|
f7043e99aff18d59102db1415aebe0995652b748
| 681
|
py
|
Python
|
plagiarismChecker.py
|
saurabhkumar29/website
|
41bb1c2850727dcf1a2a8d8664140a6951718ea6
|
[
"CC-BY-3.0"
] | null | null | null |
plagiarismChecker.py
|
saurabhkumar29/website
|
41bb1c2850727dcf1a2a8d8664140a6951718ea6
|
[
"CC-BY-3.0"
] | null | null | null |
plagiarismChecker.py
|
saurabhkumar29/website
|
41bb1c2850727dcf1a2a8d8664140a6951718ea6
|
[
"CC-BY-3.0"
] | null | null | null |
# Author: Khalid - naam toh suna hi hoga
# Steps to run ->
# :~$ python yoyo.py
from flask import Flask
from flask import request
from flask import render_template
import stringComparison
app = Flask(__name__)
@app.route('/')
def my_form():
return render_template("my-form.html")
@app.route('/', methods=['POST'])
def my_form_post():
text1 = request.form['text1']
text2 = request.form['text2']
plagiarismPercent = stringComparison.extremelySimplePlagiarismChecker(text1,text2)
if plagiarismPercent > 50 :
return "<h1>Plagiarism Detected !</h1>"
else :
return "<h1>No Plagiarism Detected !</h1>"
if __name__ == '__main__':
app.run()
| 24.321429
| 86
| 0.687225
|
from flask import Flask
from flask import request
from flask import render_template
import stringComparison
app = Flask(__name__)
@app.route('/')
def my_form():
return render_template("my-form.html")
@app.route('/', methods=['POST'])
def my_form_post():
text1 = request.form['text1']
text2 = request.form['text2']
plagiarismPercent = stringComparison.extremelySimplePlagiarismChecker(text1,text2)
if plagiarismPercent > 50 :
return "<h1>Plagiarism Detected !</h1>"
else :
return "<h1>No Plagiarism Detected !</h1>"
if __name__ == '__main__':
app.run()
| true
| true
|
f7043f749d59fc0929d6d9c0de4e74f7310101fa
| 32,980
|
py
|
Python
|
tests/test_pysnooper.py
|
leozhoujf/Pyasnooper
|
43bde4b8bf730b4782d8897b601a5925f6621f37
|
[
"MIT"
] | null | null | null |
tests/test_pysnooper.py
|
leozhoujf/Pyasnooper
|
43bde4b8bf730b4782d8897b601a5925f6621f37
|
[
"MIT"
] | null | null | null |
tests/test_pysnooper.py
|
leozhoujf/Pyasnooper
|
43bde4b8bf730b4782d8897b601a5925f6621f37
|
[
"MIT"
] | null | null | null |
# Copyright 2019 Ram Rachum and collaborators.
# This program is distributed under the MIT license.
import io
import textwrap
import threading
import types
import sys
from pysnooper.utils import truncate
from python_toolbox import sys_tools, temp_file_tools
import pytest
import pysnooper
from pysnooper.variables import needs_parentheses
from .utils import (assert_output, assert_sample_output, VariableEntry,
CallEntry, LineEntry, ReturnEntry, OpcodeEntry,
ReturnValueEntry, ExceptionEntry)
def test_string_io():
string_io = io.StringIO()
@pysnooper.snoop(string_io)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_thread_info():
@pysnooper.snoop(thread_info=True)
def my_function(foo):
x = 7
y = 8
return y + x
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function('baba')
assert result == 15
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_multi_thread_info():
@pysnooper.snoop(thread_info=True)
def my_function(foo):
x = 7
y = 8
return y + x
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
my_function('baba')
t1 = threading.Thread(target=my_function, name="test123",args=['bubu'])
t1.start()
t1.join()
t1 = threading.Thread(target=my_function, name="bibi",args=['bibi'])
t1.start()
t1.join()
output = output_capturer.string_io.getvalue()
calls = [line for line in output.split("\n") if "call" in line]
main_thread = calls[0]
assert len(main_thread) == len(calls[1])
assert len(main_thread) == len(calls[2])
main_thread_call_str = main_thread.find("call")
assert main_thread_call_str == calls[1].find("call")
assert main_thread_call_str == calls[2].find("call")
thread_info_regex = '([0-9]+-{name}+[ ]+)'
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
VariableEntry('foo', value_regex="u?'bubu'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(
name="test123")),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(
name="test123")),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(
name="test123")),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(
name="test123")),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
VariableEntry('foo', value_regex="u?'bibi'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(name='bibi')),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(name='bibi')),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(name='bibi')),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(name='bibi')),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_callable():
string_io = io.StringIO()
def write(msg):
string_io.write(msg)
@pysnooper.snoop(write)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_watch():
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@pysnooper.snoop(watch=(
'foo.x',
'io.__name__',
'len(foo.__dict__["x"] * "abc")',
))
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('Foo'),
VariableEntry('io.__name__', "'io'"),
CallEntry('def my_function():'),
LineEntry('foo = Foo()'),
VariableEntry('foo'),
VariableEntry('foo.x', '2'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '6'),
LineEntry(),
VariableEntry('i', '0'),
LineEntry(),
VariableEntry('foo.x', '4'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '12'),
LineEntry(),
VariableEntry('i', '1'),
LineEntry(),
VariableEntry('foo.x', '16'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '48'),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_watch_explode():
class Foo:
def __init__(self, x, y):
self.x = x
self.y = y
@pysnooper.snoop(watch_explode=('_d', '_point', 'lst + []'))
def my_function():
_d = {'a': 1, 'b': 2, 'c': 'ignore'}
_point = Foo(x=3, y=4)
lst = [7, 8, 9]
lst.append(10)
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('Foo'),
CallEntry('def my_function():'),
LineEntry(),
VariableEntry('_d'),
VariableEntry("_d['a']", '1'),
VariableEntry("_d['b']", '2'),
VariableEntry("_d['c']", "'ignore'"),
LineEntry(),
VariableEntry('_point'),
VariableEntry('_point.x', '3'),
VariableEntry('_point.y', '4'),
LineEntry(),
VariableEntry('lst'),
VariableEntry('(lst + [])[0]', '7'),
VariableEntry('(lst + [])[1]', '8'),
VariableEntry('(lst + [])[2]', '9'),
VariableEntry('lst + []'),
LineEntry(),
VariableEntry('lst'),
VariableEntry('(lst + [])[3]', '10'),
VariableEntry('lst + []'),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_variables_classes():
class WithSlots(object):
__slots__ = ('x', 'y')
def __init__(self):
self.x = 3
self.y = 4
@pysnooper.snoop(watch=(
pysnooper.Keys('_d', exclude='c'),
pysnooper.Attrs('_d'), # doesn't have attributes
pysnooper.Attrs('_s'),
pysnooper.Indices('_lst')[-3:],
))
def my_function():
_d = {'a': 1, 'b': 2, 'c': 'ignore'}
_s = WithSlots()
_lst = list(range(1000))
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('WithSlots'),
CallEntry('def my_function():'),
LineEntry(),
VariableEntry('_d'),
VariableEntry("_d['a']", '1'),
VariableEntry("_d['b']", '2'),
LineEntry(),
VariableEntry('_s'),
VariableEntry('_s.x', '3'),
VariableEntry('_s.y', '4'),
LineEntry(),
VariableEntry('_lst'),
VariableEntry('_lst[997]', '997'),
VariableEntry('_lst[998]', '998'),
VariableEntry('_lst[999]', '999'),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_single_watch_no_comma():
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@pysnooper.snoop(watch='foo')
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('Foo'),
CallEntry('def my_function():'),
LineEntry('foo = Foo()'),
VariableEntry('foo'),
LineEntry(),
VariableEntry('i', '0'),
LineEntry(),
LineEntry(),
VariableEntry('i', '1'),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_long_variable():
@pysnooper.snoop()
def my_function():
foo = list(range(1000))
return foo
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result == list(range(1000))
output = output_capturer.string_io.getvalue()
regex = r'^\[0, 1, 2, .*\.\.\..*, 997, 998, 999\]$'
assert_output(
output,
(
CallEntry('def my_function():'),
LineEntry('foo = list(range(1000))'),
VariableEntry('foo', value_regex=regex),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(value_regex=regex)
)
)
def test_repr_exception():
class Bad(object):
def __repr__(self):
1 / 0
@pysnooper.snoop()
def my_function():
bad = Bad()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('Bad'),
CallEntry('def my_function():'),
LineEntry('bad = Bad()'),
VariableEntry('bad', value='REPR FAILED'),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_depth():
string_io = io.StringIO()
def f4(x4):
result4 = x4 * 2
return result4
def f3(x3):
result3 = f4(x3)
return result3
def f2(x2):
result2 = f3(x2)
return result2
@pysnooper.snoop(string_io, depth=3)
def f1(x1):
result1 = f2(x1)
return result1
result = f1(10)
assert result == 20
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry(),
VariableEntry(),
CallEntry('def f1(x1):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f2(x2):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f3(x3):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
)
)
def test_method_and_prefix():
class Baz(object):
def __init__(self):
self.x = 2
@pysnooper.snoop(watch=('self.x',), prefix='ZZZ')
def square(self):
foo = 7
self.x **= 2
return self
baz = Baz()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = baz.square()
assert result is baz
assert result.x == 4
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('self', prefix='ZZZ'),
VariableEntry('self.x', '2', prefix='ZZZ'),
CallEntry('def square(self):', prefix='ZZZ'),
LineEntry('foo = 7', prefix='ZZZ'),
VariableEntry('foo', '7', prefix='ZZZ'),
LineEntry('self.x **= 2', prefix='ZZZ'),
VariableEntry('self.x', '4', prefix='ZZZ'),
LineEntry(prefix='ZZZ'),
ReturnEntry(prefix='ZZZ'),
ReturnValueEntry(prefix='ZZZ'),
),
prefix='ZZZ'
)
def test_file_output():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
@pysnooper.snoop(path)
def my_function(_foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert_output(
output,
(
VariableEntry('_foo', value_regex="u?'baba'"),
CallEntry('def my_function(_foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_confusing_decorator_lines():
string_io = io.StringIO()
def empty_decorator(function):
return function
@empty_decorator
@pysnooper.snoop(string_io,
depth=2) # Multi-line decorator for extra confusion!
@empty_decorator
@empty_decorator
def my_function(foo):
x = lambda bar: 7
y = 8
return y + x(foo)
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry(),
LineEntry(),
# inside lambda
VariableEntry('bar', value_regex="u?'baba'"),
CallEntry('x = lambda bar: 7'),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('7'),
# back in my_function
ReturnEntry(),
ReturnValueEntry('15'),
)
)
def test_lambda():
string_io = io.StringIO()
my_function = pysnooper.snoop(string_io)(lambda x: x ** 2)
result = my_function(7)
assert result == 49
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('x', '7'),
CallEntry(source_regex='^my_function = pysnooper.*'),
LineEntry(source_regex='^my_function = pysnooper.*'),
ReturnEntry(source_regex='^my_function = pysnooper.*'),
ReturnValueEntry('49'),
)
)
def test_unavailable_source():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder, \
sys_tools.TempSysPathAdder(str(folder)):
module_name = 'iaerojajsijf'
python_file_path = folder / ('%s.py' % (module_name,))
content = textwrap.dedent(u'''
import pysnooper
@pysnooper.snoop()
def f(x):
return x
''')
with python_file_path.open('w') as python_file:
python_file.write(content)
module = __import__(module_name)
python_file_path.unlink()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = getattr(module, 'f')(7)
assert result == 7
output = output_capturer.output
assert_output(
output,
(
VariableEntry(stage='starting'),
CallEntry('SOURCE IS UNAVAILABLE'),
LineEntry('SOURCE IS UNAVAILABLE'),
ReturnEntry('SOURCE IS UNAVAILABLE'),
ReturnValueEntry('7'),
)
)
def test_no_overwrite_by_default():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
with path.open('w') as output_file:
output_file.write(u'lala')
@pysnooper.snoop(str(path))
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert output.startswith('lala')
shortened_output = output[4:]
assert_output(
shortened_output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_overwrite():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
with path.open('w') as output_file:
output_file.write(u'lala')
@pysnooper.snoop(str(path), overwrite=True)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert 'lala' not in output
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_error_in_overwrite_argument():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder:
with pytest.raises(Exception, match='can only be used when writing'):
@pysnooper.snoop(overwrite=True)
def my_function(foo):
x = 7
y = 8
return y + x
def test_needs_parentheses():
assert not needs_parentheses('x')
assert not needs_parentheses('x.y')
assert not needs_parentheses('x.y.z')
assert not needs_parentheses('x.y.z[0]')
assert not needs_parentheses('x.y.z[0]()')
assert not needs_parentheses('x.y.z[0]()(3, 4 * 5)')
assert not needs_parentheses('foo(x)')
assert not needs_parentheses('foo(x+y)')
assert not needs_parentheses('(x+y)')
assert not needs_parentheses('[x+1 for x in ()]')
assert needs_parentheses('x + y')
assert needs_parentheses('x * y')
assert needs_parentheses('x and y')
assert needs_parentheses('x if z else y')
def test_with_block():
# Testing that a single Tracer can handle many mixed uses
snoop = pysnooper.snoop()
def foo(x):
if x == 0:
bar1(x)
qux()
return
with snoop:
# There should be line entries for these three lines,
# no line entries for anything else in this function,
# but calls to all bar functions should be traced
foo(x - 1)
bar2(x)
qux()
int(4)
bar3(9)
return x
@snoop
def bar1(_x):
qux()
@snoop
def bar2(_x):
qux()
@snoop
def bar3(_x):
qux()
def qux():
return 9 # not traced, mustn't show up
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = foo(2)
assert result == 2
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
# In first with
VariableEntry('x', '2'),
VariableEntry('bar1'),
VariableEntry('bar2'),
VariableEntry('bar3'),
VariableEntry('foo'),
VariableEntry('qux'),
VariableEntry('snoop'),
LineEntry('foo(x - 1)'),
# In with in recursive call
VariableEntry('x', '1'),
VariableEntry('bar1'),
VariableEntry('bar2'),
VariableEntry('bar3'),
VariableEntry('foo'),
VariableEntry('qux'),
VariableEntry('snoop'),
LineEntry('foo(x - 1)'),
# Call to bar1 from if block outside with
VariableEntry('_x', '0'),
VariableEntry('qux'),
CallEntry('def bar1(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# In with in recursive call
LineEntry('bar2(x)'),
# Call to bar2 from within with
VariableEntry('_x', '1'),
VariableEntry('qux'),
CallEntry('def bar2(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# In with in recursive call
LineEntry('qux()'),
# Call to bar3 from after with
VariableEntry('_x', '9'),
VariableEntry('qux'),
CallEntry('def bar3(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# -- Similar to previous few sections,
# -- but from first call to foo
# In with in first call
LineEntry('bar2(x)'),
# Call to bar2 from within with
VariableEntry('_x', '2'),
VariableEntry('qux'),
CallEntry('def bar2(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# In with in first call
LineEntry('qux()'),
# Call to bar3 from after with
VariableEntry('_x', '9'),
VariableEntry('qux'),
CallEntry('def bar3(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
),
)
def test_with_block_depth():
string_io = io.StringIO()
def f4(x4):
result4 = x4 * 2
return result4
def f3(x3):
result3 = f4(x3)
return result3
def f2(x2):
result2 = f3(x2)
return result2
def f1(x1):
str(3)
with pysnooper.snoop(string_io, depth=3):
result1 = f2(x1)
return result1
result = f1(10)
assert result == 20
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry(),
VariableEntry(),
VariableEntry(),
LineEntry('result1 = f2(x1)'),
VariableEntry(),
VariableEntry(),
CallEntry('def f2(x2):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f3(x3):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
)
)
def test_cellvars():
string_io = io.StringIO()
def f2(a):
def f3(a):
x = 0
x += 1
def f4(a):
y = x
return 42
return f4(a)
return f3(a)
def f1(a):
with pysnooper.snoop(string_io, depth=4):
result1 = f2(a)
return result1
result = f1(42)
assert result == 42
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry(),
VariableEntry(),
VariableEntry(),
LineEntry('result1 = f2(a)'),
VariableEntry(),
CallEntry('def f2(a):'),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry("a"),
CallEntry('def f3(a):'),
LineEntry(),
VariableEntry("x"),
LineEntry(),
VariableEntry("x"),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry(),
VariableEntry("x"),
CallEntry('def f4(a):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(),
ReturnEntry(),
ReturnValueEntry(),
ReturnEntry(),
ReturnValueEntry(),
)
)
def test_var_order():
string_io = io.StringIO()
def f(one, two, three, four):
five = None
six = None
seven = None
five, six, seven = 5, 6, 7
with pysnooper.snoop(string_io, depth=2):
result = f(1, 2, 3, 4)
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry(),
VariableEntry(),
LineEntry('result = f(1, 2, 3, 4)'),
VariableEntry("one", "1"),
VariableEntry("two", "2"),
VariableEntry("three", "3"),
VariableEntry("four", "4"),
CallEntry('def f(one, two, three, four):'),
LineEntry(),
VariableEntry("five"),
LineEntry(),
VariableEntry("six"),
LineEntry(),
VariableEntry("seven"),
LineEntry(),
VariableEntry("five", "5"),
VariableEntry("six", "6"),
VariableEntry("seven", "7"),
ReturnEntry(),
ReturnValueEntry(),
)
)
def test_truncate():
max_length = 20
for i in range(max_length * 2):
string = i * 'a'
truncated = truncate(string, max_length)
if len(string) <= max_length:
assert string == truncated
else:
assert truncated == 'aaaaaaaa...aaaaaaaaa'
assert len(truncated) == max_length
def test_indentation():
from .samples import indentation, recursion
assert_sample_output(indentation)
assert_sample_output(recursion)
def test_exception():
from .samples import exception
assert_sample_output(exception)
def test_generator():
string_io = io.StringIO()
original_tracer = sys.gettrace()
original_tracer_active = lambda: (sys.gettrace() is original_tracer)
@pysnooper.snoop(string_io)
def f(x1):
assert not original_tracer_active()
x2 = (yield x1)
assert not original_tracer_active()
x3 = 'foo'
assert not original_tracer_active()
x4 = (yield 2)
assert not original_tracer_active()
return
assert original_tracer_active()
generator = f(0)
assert original_tracer_active()
first_item = next(generator)
assert original_tracer_active()
assert first_item == 0
second_item = generator.send('blabla')
assert original_tracer_active()
assert second_item == 2
with pytest.raises(StopIteration) as exc_info:
generator.send('looloo')
assert original_tracer_active()
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('x1', '0'),
VariableEntry(),
CallEntry(),
LineEntry(),
VariableEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('0'),
# Pause and resume:
VariableEntry('x1', '0'),
VariableEntry(),
VariableEntry(),
VariableEntry(),
CallEntry(),
VariableEntry('x2', "'blabla'"),
LineEntry(),
LineEntry(),
VariableEntry('x3', "'foo'"),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('2'),
# Pause and resume:
VariableEntry('x1', '0'),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
CallEntry(),
VariableEntry('x4', "'looloo'"),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(None),
)
)
def test_custom_repr():
string_io = io.StringIO()
def large(l):
return isinstance(l, list) and len(l) > 5
def print_list_size(l):
return 'list(size={})'.format(len(l))
def print_dict(d):
return 'dict(keys={})'.format(sorted(list(d.keys())))
def evil_condition(x):
return large(x) or isinstance(x, dict)
@pysnooper.snoop(string_io, custom_repr=(
(large, print_list_size),
(dict, print_dict),
(evil_condition, lambda x: 'I am evil')))
def sum_to_x(x):
l = list(range(x))
a = {'1': 1, '2': 2}
return sum(l)
result = sum_to_x(10000)
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('x', '10000'),
CallEntry(),
LineEntry(),
VariableEntry('l', 'list(size=10000)'),
LineEntry(),
VariableEntry('a', "dict(keys=['1', '2'])"),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('49995000'),
)
)
| 28.068085
| 79
| 0.503062
|
import io
import textwrap
import threading
import types
import sys
from pysnooper.utils import truncate
from python_toolbox import sys_tools, temp_file_tools
import pytest
import pysnooper
from pysnooper.variables import needs_parentheses
from .utils import (assert_output, assert_sample_output, VariableEntry,
CallEntry, LineEntry, ReturnEntry, OpcodeEntry,
ReturnValueEntry, ExceptionEntry)
def test_string_io():
string_io = io.StringIO()
@pysnooper.snoop(string_io)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_thread_info():
@pysnooper.snoop(thread_info=True)
def my_function(foo):
x = 7
y = 8
return y + x
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function('baba')
assert result == 15
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_multi_thread_info():
@pysnooper.snoop(thread_info=True)
def my_function(foo):
x = 7
y = 8
return y + x
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
my_function('baba')
t1 = threading.Thread(target=my_function, name="test123",args=['bubu'])
t1.start()
t1.join()
t1 = threading.Thread(target=my_function, name="bibi",args=['bibi'])
t1.start()
t1.join()
output = output_capturer.string_io.getvalue()
calls = [line for line in output.split("\n") if "call" in line]
main_thread = calls[0]
assert len(main_thread) == len(calls[1])
assert len(main_thread) == len(calls[2])
main_thread_call_str = main_thread.find("call")
assert main_thread_call_str == calls[1].find("call")
assert main_thread_call_str == calls[2].find("call")
thread_info_regex = '([0-9]+-{name}+[ ]+)'
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
VariableEntry('foo', value_regex="u?'bubu'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(
name="test123")),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(
name="test123")),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(
name="test123")),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(
name="test123")),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
VariableEntry('foo', value_regex="u?'bibi'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(name='bibi')),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(name='bibi')),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(name='bibi')),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(name='bibi')),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_callable():
string_io = io.StringIO()
def write(msg):
string_io.write(msg)
@pysnooper.snoop(write)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_watch():
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@pysnooper.snoop(watch=(
'foo.x',
'io.__name__',
'len(foo.__dict__["x"] * "abc")',
))
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('Foo'),
VariableEntry('io.__name__', "'io'"),
CallEntry('def my_function():'),
LineEntry('foo = Foo()'),
VariableEntry('foo'),
VariableEntry('foo.x', '2'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '6'),
LineEntry(),
VariableEntry('i', '0'),
LineEntry(),
VariableEntry('foo.x', '4'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '12'),
LineEntry(),
VariableEntry('i', '1'),
LineEntry(),
VariableEntry('foo.x', '16'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '48'),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_watch_explode():
class Foo:
def __init__(self, x, y):
self.x = x
self.y = y
@pysnooper.snoop(watch_explode=('_d', '_point', 'lst + []'))
def my_function():
_d = {'a': 1, 'b': 2, 'c': 'ignore'}
_point = Foo(x=3, y=4)
lst = [7, 8, 9]
lst.append(10)
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('Foo'),
CallEntry('def my_function():'),
LineEntry(),
VariableEntry('_d'),
VariableEntry("_d['a']", '1'),
VariableEntry("_d['b']", '2'),
VariableEntry("_d['c']", "'ignore'"),
LineEntry(),
VariableEntry('_point'),
VariableEntry('_point.x', '3'),
VariableEntry('_point.y', '4'),
LineEntry(),
VariableEntry('lst'),
VariableEntry('(lst + [])[0]', '7'),
VariableEntry('(lst + [])[1]', '8'),
VariableEntry('(lst + [])[2]', '9'),
VariableEntry('lst + []'),
LineEntry(),
VariableEntry('lst'),
VariableEntry('(lst + [])[3]', '10'),
VariableEntry('lst + []'),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_variables_classes():
class WithSlots(object):
__slots__ = ('x', 'y')
def __init__(self):
self.x = 3
self.y = 4
@pysnooper.snoop(watch=(
pysnooper.Keys('_d', exclude='c'),
pysnooper.Attrs('_d'), pysnooper.Attrs('_s'),
pysnooper.Indices('_lst')[-3:],
))
def my_function():
_d = {'a': 1, 'b': 2, 'c': 'ignore'}
_s = WithSlots()
_lst = list(range(1000))
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('WithSlots'),
CallEntry('def my_function():'),
LineEntry(),
VariableEntry('_d'),
VariableEntry("_d['a']", '1'),
VariableEntry("_d['b']", '2'),
LineEntry(),
VariableEntry('_s'),
VariableEntry('_s.x', '3'),
VariableEntry('_s.y', '4'),
LineEntry(),
VariableEntry('_lst'),
VariableEntry('_lst[997]', '997'),
VariableEntry('_lst[998]', '998'),
VariableEntry('_lst[999]', '999'),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_single_watch_no_comma():
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@pysnooper.snoop(watch='foo')
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('Foo'),
CallEntry('def my_function():'),
LineEntry('foo = Foo()'),
VariableEntry('foo'),
LineEntry(),
VariableEntry('i', '0'),
LineEntry(),
LineEntry(),
VariableEntry('i', '1'),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_long_variable():
@pysnooper.snoop()
def my_function():
foo = list(range(1000))
return foo
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result == list(range(1000))
output = output_capturer.string_io.getvalue()
regex = r'^\[0, 1, 2, .*\.\.\..*, 997, 998, 999\]$'
assert_output(
output,
(
CallEntry('def my_function():'),
LineEntry('foo = list(range(1000))'),
VariableEntry('foo', value_regex=regex),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(value_regex=regex)
)
)
def test_repr_exception():
class Bad(object):
def __repr__(self):
1 / 0
@pysnooper.snoop()
def my_function():
bad = Bad()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('Bad'),
CallEntry('def my_function():'),
LineEntry('bad = Bad()'),
VariableEntry('bad', value='REPR FAILED'),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_depth():
string_io = io.StringIO()
def f4(x4):
result4 = x4 * 2
return result4
def f3(x3):
result3 = f4(x3)
return result3
def f2(x2):
result2 = f3(x2)
return result2
@pysnooper.snoop(string_io, depth=3)
def f1(x1):
result1 = f2(x1)
return result1
result = f1(10)
assert result == 20
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry(),
VariableEntry(),
CallEntry('def f1(x1):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f2(x2):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f3(x3):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
)
)
def test_method_and_prefix():
class Baz(object):
def __init__(self):
self.x = 2
@pysnooper.snoop(watch=('self.x',), prefix='ZZZ')
def square(self):
foo = 7
self.x **= 2
return self
baz = Baz()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = baz.square()
assert result is baz
assert result.x == 4
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('self', prefix='ZZZ'),
VariableEntry('self.x', '2', prefix='ZZZ'),
CallEntry('def square(self):', prefix='ZZZ'),
LineEntry('foo = 7', prefix='ZZZ'),
VariableEntry('foo', '7', prefix='ZZZ'),
LineEntry('self.x **= 2', prefix='ZZZ'),
VariableEntry('self.x', '4', prefix='ZZZ'),
LineEntry(prefix='ZZZ'),
ReturnEntry(prefix='ZZZ'),
ReturnValueEntry(prefix='ZZZ'),
),
prefix='ZZZ'
)
def test_file_output():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
@pysnooper.snoop(path)
def my_function(_foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert_output(
output,
(
VariableEntry('_foo', value_regex="u?'baba'"),
CallEntry('def my_function(_foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_confusing_decorator_lines():
string_io = io.StringIO()
def empty_decorator(function):
return function
@empty_decorator
@pysnooper.snoop(string_io,
depth=2) # Multi-line decorator for extra confusion!
@empty_decorator
@empty_decorator
def my_function(foo):
x = lambda bar: 7
y = 8
return y + x(foo)
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry(),
LineEntry(),
# inside lambda
VariableEntry('bar', value_regex="u?'baba'"),
CallEntry('x = lambda bar: 7'),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('7'),
# back in my_function
ReturnEntry(),
ReturnValueEntry('15'),
)
)
def test_lambda():
string_io = io.StringIO()
my_function = pysnooper.snoop(string_io)(lambda x: x ** 2)
result = my_function(7)
assert result == 49
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('x', '7'),
CallEntry(source_regex='^my_function = pysnooper.*'),
LineEntry(source_regex='^my_function = pysnooper.*'),
ReturnEntry(source_regex='^my_function = pysnooper.*'),
ReturnValueEntry('49'),
)
)
def test_unavailable_source():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder, \
sys_tools.TempSysPathAdder(str(folder)):
module_name = 'iaerojajsijf'
python_file_path = folder / ('%s.py' % (module_name,))
content = textwrap.dedent(u'''
import pysnooper
@pysnooper.snoop()
def f(x):
return x
''')
with python_file_path.open('w') as python_file:
python_file.write(content)
module = __import__(module_name)
python_file_path.unlink()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = getattr(module, 'f')(7)
assert result == 7
output = output_capturer.output
assert_output(
output,
(
VariableEntry(stage='starting'),
CallEntry('SOURCE IS UNAVAILABLE'),
LineEntry('SOURCE IS UNAVAILABLE'),
ReturnEntry('SOURCE IS UNAVAILABLE'),
ReturnValueEntry('7'),
)
)
def test_no_overwrite_by_default():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
with path.open('w') as output_file:
output_file.write(u'lala')
@pysnooper.snoop(str(path))
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert output.startswith('lala')
shortened_output = output[4:]
assert_output(
shortened_output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_overwrite():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
with path.open('w') as output_file:
output_file.write(u'lala')
@pysnooper.snoop(str(path), overwrite=True)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert 'lala' not in output
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_error_in_overwrite_argument():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder:
with pytest.raises(Exception, match='can only be used when writing'):
@pysnooper.snoop(overwrite=True)
def my_function(foo):
x = 7
y = 8
return y + x
def test_needs_parentheses():
assert not needs_parentheses('x')
assert not needs_parentheses('x.y')
assert not needs_parentheses('x.y.z')
assert not needs_parentheses('x.y.z[0]')
assert not needs_parentheses('x.y.z[0]()')
assert not needs_parentheses('x.y.z[0]()(3, 4 * 5)')
assert not needs_parentheses('foo(x)')
assert not needs_parentheses('foo(x+y)')
assert not needs_parentheses('(x+y)')
assert not needs_parentheses('[x+1 for x in ()]')
assert needs_parentheses('x + y')
assert needs_parentheses('x * y')
assert needs_parentheses('x and y')
assert needs_parentheses('x if z else y')
def test_with_block():
# Testing that a single Tracer can handle many mixed uses
snoop = pysnooper.snoop()
def foo(x):
if x == 0:
bar1(x)
qux()
return
with snoop:
# There should be line entries for these three lines,
# no line entries for anything else in this function,
# but calls to all bar functions should be traced
foo(x - 1)
bar2(x)
qux()
int(4)
bar3(9)
return x
@snoop
def bar1(_x):
qux()
@snoop
def bar2(_x):
qux()
@snoop
def bar3(_x):
qux()
def qux():
return 9 # not traced, mustn't show up
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = foo(2)
assert result == 2
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('x', '2'),
VariableEntry('bar1'),
VariableEntry('bar2'),
VariableEntry('bar3'),
VariableEntry('foo'),
VariableEntry('qux'),
VariableEntry('snoop'),
LineEntry('foo(x - 1)'),
VariableEntry('x', '1'),
VariableEntry('bar1'),
VariableEntry('bar2'),
VariableEntry('bar3'),
VariableEntry('foo'),
VariableEntry('qux'),
VariableEntry('snoop'),
LineEntry('foo(x - 1)'),
VariableEntry('_x', '0'),
VariableEntry('qux'),
CallEntry('def bar1(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
LineEntry('bar2(x)'),
VariableEntry('_x', '1'),
VariableEntry('qux'),
CallEntry('def bar2(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
LineEntry('qux()'),
VariableEntry('_x', '9'),
VariableEntry('qux'),
CallEntry('def bar3(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
LineEntry('bar2(x)'),
VariableEntry('_x', '2'),
VariableEntry('qux'),
CallEntry('def bar2(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
LineEntry('qux()'),
VariableEntry('_x', '9'),
VariableEntry('qux'),
CallEntry('def bar3(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
),
)
def test_with_block_depth():
string_io = io.StringIO()
def f4(x4):
result4 = x4 * 2
return result4
def f3(x3):
result3 = f4(x3)
return result3
def f2(x2):
result2 = f3(x2)
return result2
def f1(x1):
str(3)
with pysnooper.snoop(string_io, depth=3):
result1 = f2(x1)
return result1
result = f1(10)
assert result == 20
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry(),
VariableEntry(),
VariableEntry(),
LineEntry('result1 = f2(x1)'),
VariableEntry(),
VariableEntry(),
CallEntry('def f2(x2):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f3(x3):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
)
)
def test_cellvars():
string_io = io.StringIO()
def f2(a):
def f3(a):
x = 0
x += 1
def f4(a):
y = x
return 42
return f4(a)
return f3(a)
def f1(a):
with pysnooper.snoop(string_io, depth=4):
result1 = f2(a)
return result1
result = f1(42)
assert result == 42
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry(),
VariableEntry(),
VariableEntry(),
LineEntry('result1 = f2(a)'),
VariableEntry(),
CallEntry('def f2(a):'),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry("a"),
CallEntry('def f3(a):'),
LineEntry(),
VariableEntry("x"),
LineEntry(),
VariableEntry("x"),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry(),
VariableEntry("x"),
CallEntry('def f4(a):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(),
ReturnEntry(),
ReturnValueEntry(),
ReturnEntry(),
ReturnValueEntry(),
)
)
def test_var_order():
string_io = io.StringIO()
def f(one, two, three, four):
five = None
six = None
seven = None
five, six, seven = 5, 6, 7
with pysnooper.snoop(string_io, depth=2):
result = f(1, 2, 3, 4)
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry(),
VariableEntry(),
LineEntry('result = f(1, 2, 3, 4)'),
VariableEntry("one", "1"),
VariableEntry("two", "2"),
VariableEntry("three", "3"),
VariableEntry("four", "4"),
CallEntry('def f(one, two, three, four):'),
LineEntry(),
VariableEntry("five"),
LineEntry(),
VariableEntry("six"),
LineEntry(),
VariableEntry("seven"),
LineEntry(),
VariableEntry("five", "5"),
VariableEntry("six", "6"),
VariableEntry("seven", "7"),
ReturnEntry(),
ReturnValueEntry(),
)
)
def test_truncate():
max_length = 20
for i in range(max_length * 2):
string = i * 'a'
truncated = truncate(string, max_length)
if len(string) <= max_length:
assert string == truncated
else:
assert truncated == 'aaaaaaaa...aaaaaaaaa'
assert len(truncated) == max_length
def test_indentation():
from .samples import indentation, recursion
assert_sample_output(indentation)
assert_sample_output(recursion)
def test_exception():
from .samples import exception
assert_sample_output(exception)
def test_generator():
string_io = io.StringIO()
original_tracer = sys.gettrace()
original_tracer_active = lambda: (sys.gettrace() is original_tracer)
@pysnooper.snoop(string_io)
def f(x1):
assert not original_tracer_active()
x2 = (yield x1)
assert not original_tracer_active()
x3 = 'foo'
assert not original_tracer_active()
x4 = (yield 2)
assert not original_tracer_active()
return
assert original_tracer_active()
generator = f(0)
assert original_tracer_active()
first_item = next(generator)
assert original_tracer_active()
assert first_item == 0
second_item = generator.send('blabla')
assert original_tracer_active()
assert second_item == 2
with pytest.raises(StopIteration) as exc_info:
generator.send('looloo')
assert original_tracer_active()
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('x1', '0'),
VariableEntry(),
CallEntry(),
LineEntry(),
VariableEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('0'),
VariableEntry('x1', '0'),
VariableEntry(),
VariableEntry(),
VariableEntry(),
CallEntry(),
VariableEntry('x2', "'blabla'"),
LineEntry(),
LineEntry(),
VariableEntry('x3', "'foo'"),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('2'),
VariableEntry('x1', '0'),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
CallEntry(),
VariableEntry('x4', "'looloo'"),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(None),
)
)
def test_custom_repr():
string_io = io.StringIO()
def large(l):
return isinstance(l, list) and len(l) > 5
def print_list_size(l):
return 'list(size={})'.format(len(l))
def print_dict(d):
return 'dict(keys={})'.format(sorted(list(d.keys())))
def evil_condition(x):
return large(x) or isinstance(x, dict)
@pysnooper.snoop(string_io, custom_repr=(
(large, print_list_size),
(dict, print_dict),
(evil_condition, lambda x: 'I am evil')))
def sum_to_x(x):
l = list(range(x))
a = {'1': 1, '2': 2}
return sum(l)
result = sum_to_x(10000)
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('x', '10000'),
CallEntry(),
LineEntry(),
VariableEntry('l', 'list(size=10000)'),
LineEntry(),
VariableEntry('a', "dict(keys=['1', '2'])"),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('49995000'),
)
)
| true
| true
|
f7043fdb3cb677c8bc7f76a02ec8ae40c8f1cd3f
| 3,005
|
py
|
Python
|
tests/templates/test_templates.py
|
lhenkelm/cabinetry
|
40120c2718502cd69c8486020de963bde9005989
|
[
"BSD-3-Clause"
] | 13
|
2020-04-30T04:23:06.000Z
|
2021-09-06T20:26:31.000Z
|
tests/templates/test_templates.py
|
alexander-held/pytfc
|
fce72088b4a6345304bf8c2e489938d41087a253
|
[
"BSD-3-Clause"
] | 247
|
2020-05-07T00:26:02.000Z
|
2021-09-17T14:24:43.000Z
|
tests/templates/test_templates.py
|
alexander-held/pytfc
|
fce72088b4a6345304bf8c2e489938d41087a253
|
[
"BSD-3-Clause"
] | 6
|
2020-05-07T00:11:27.000Z
|
2021-03-11T18:26:07.000Z
|
import logging
import pathlib
from unittest import mock
from cabinetry import templates
@mock.patch("cabinetry.route.apply_to_all_templates")
@mock.patch("cabinetry.templates.builder._Builder")
def test_build(mock_builder, mock_apply):
config = {"General": {"HistogramFolder": "path/", "InputPath": "file.root"}}
method = "uproot"
# no router
templates.build(config, method=method)
assert mock_builder.call_args_list == [
((pathlib.Path("path/"), "file.root", method), {})
]
assert mock_apply.call_count == 1
config_call, func_call = mock_apply.call_args[0]
assert config_call == config
assert func_call._extract_mock_name() == "_Builder()._create_histogram"
assert mock_apply.call_args[1] == {"match_func": None}
# including a router
mock_router = mock.MagicMock()
templates.build(config, method=method, router=mock_router)
# verify wrapper was set
assert (
mock_router.template_builder_wrapper._extract_mock_name()
== "_Builder()._wrap_custom_template_builder"
)
assert mock_apply.call_count == 2 # 1 from before
config_call, func_call = mock_apply.call_args[0]
assert config_call == config
assert func_call._extract_mock_name() == "_Builder()._create_histogram"
assert mock_apply.call_args[1] == {
"match_func": mock_router._find_template_builder_match
}
@mock.patch("cabinetry.route.apply_to_all_templates")
@mock.patch("cabinetry.templates.collector._collector", return_value="func")
def test_collect(mock_collector, mock_apply, caplog):
caplog.set_level(logging.DEBUG)
config = {
"General": {
"HistogramFolder": "path/",
"InputPath": "f.root:{VariationPath}",
"VariationPath": "nominal",
}
}
method = "uproot"
templates.collect(config, method=method)
assert mock_collector.call_args_list == [
((pathlib.Path("path/"), "f.root:{VariationPath}", "nominal", method), {})
]
assert mock_apply.call_args_list == [((config, "func"), {})]
caplog.clear()
# no VariationPath in general settings
config = {
"General": {"HistogramFolder": "path/", "InputPath": "f.root:{VariationPath}"}
}
templates.collect(config, method=method)
assert 'no VariationPath specified in general settings, defaulting to ""' in [
rec.message for rec in caplog.records
]
assert mock_collector.call_args == (
(pathlib.Path("path/"), "f.root:{VariationPath}", "", method),
{},
)
caplog.set_level(logging.DEBUG)
@mock.patch("cabinetry.route.apply_to_all_templates")
@mock.patch("cabinetry.templates.postprocessor._postprocessor", return_value="func")
def test_run(mock_postprocessor, mock_apply):
config = {"General": {"HistogramFolder": "path/"}}
templates.postprocess(config)
assert mock_postprocessor.call_args_list == [((pathlib.Path("path/"),), {})]
assert mock_apply.call_args_list == [((config, "func"), {})]
| 34.147727
| 86
| 0.678203
|
import logging
import pathlib
from unittest import mock
from cabinetry import templates
@mock.patch("cabinetry.route.apply_to_all_templates")
@mock.patch("cabinetry.templates.builder._Builder")
def test_build(mock_builder, mock_apply):
config = {"General": {"HistogramFolder": "path/", "InputPath": "file.root"}}
method = "uproot"
templates.build(config, method=method)
assert mock_builder.call_args_list == [
((pathlib.Path("path/"), "file.root", method), {})
]
assert mock_apply.call_count == 1
config_call, func_call = mock_apply.call_args[0]
assert config_call == config
assert func_call._extract_mock_name() == "_Builder()._create_histogram"
assert mock_apply.call_args[1] == {"match_func": None}
mock_router = mock.MagicMock()
templates.build(config, method=method, router=mock_router)
assert (
mock_router.template_builder_wrapper._extract_mock_name()
== "_Builder()._wrap_custom_template_builder"
)
assert mock_apply.call_count == 2 config_call, func_call = mock_apply.call_args[0]
assert config_call == config
assert func_call._extract_mock_name() == "_Builder()._create_histogram"
assert mock_apply.call_args[1] == {
"match_func": mock_router._find_template_builder_match
}
@mock.patch("cabinetry.route.apply_to_all_templates")
@mock.patch("cabinetry.templates.collector._collector", return_value="func")
def test_collect(mock_collector, mock_apply, caplog):
caplog.set_level(logging.DEBUG)
config = {
"General": {
"HistogramFolder": "path/",
"InputPath": "f.root:{VariationPath}",
"VariationPath": "nominal",
}
}
method = "uproot"
templates.collect(config, method=method)
assert mock_collector.call_args_list == [
((pathlib.Path("path/"), "f.root:{VariationPath}", "nominal", method), {})
]
assert mock_apply.call_args_list == [((config, "func"), {})]
caplog.clear()
config = {
"General": {"HistogramFolder": "path/", "InputPath": "f.root:{VariationPath}"}
}
templates.collect(config, method=method)
assert 'no VariationPath specified in general settings, defaulting to ""' in [
rec.message for rec in caplog.records
]
assert mock_collector.call_args == (
(pathlib.Path("path/"), "f.root:{VariationPath}", "", method),
{},
)
caplog.set_level(logging.DEBUG)
@mock.patch("cabinetry.route.apply_to_all_templates")
@mock.patch("cabinetry.templates.postprocessor._postprocessor", return_value="func")
def test_run(mock_postprocessor, mock_apply):
config = {"General": {"HistogramFolder": "path/"}}
templates.postprocess(config)
assert mock_postprocessor.call_args_list == [((pathlib.Path("path/"),), {})]
assert mock_apply.call_args_list == [((config, "func"), {})]
| true
| true
|
f70440dd09341f241e10da230ab174b5743ee8df
| 11,582
|
py
|
Python
|
MecademicRobot/RobotFeedback.py
|
GarrisonJohnston123/meca500_python2_driver
|
a5b9be9362dba3612b902cc5dfee5553d1a895cd
|
[
"MIT"
] | null | null | null |
MecademicRobot/RobotFeedback.py
|
GarrisonJohnston123/meca500_python2_driver
|
a5b9be9362dba3612b902cc5dfee5553d1a895cd
|
[
"MIT"
] | null | null | null |
MecademicRobot/RobotFeedback.py
|
GarrisonJohnston123/meca500_python2_driver
|
a5b9be9362dba3612b902cc5dfee5553d1a895cd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import socket
import re
class RobotFeedback:
"""Class for the Mecademic Robot allowing for live positional
feedback of the Mecademic Robot.
Attributes
----------
address : string
The IP address associated to the Mecademic robot.
socket : socket
Socket connecting to physical Mecademic Robot.
robot_status : tuple of boolean
States status bit of the robot.
gripper_status : tuple of boolean
States status bit of the gripper.
joints : tuple of floats
Joint angle in degrees of each joint starting from
joint 1 going all way to joint 6.
cartesian : tuple of floats
The cartesian values in mm and degrees of the TRF.
joints_vel : floats
Velocity of joints.
torque : tuple of floats
Torque of joints.
accelerometer : tuple of floats
Acceleration of joints.
last_msg_chunk : string
Buffer of received messages.
version : string
Firmware version of the Mecademic Robot.
version_regex : list of int
Version_regex.
"""
def __init__(self, address, firmware_version):
"""Constructor for an instance of the class Mecademic robot.
Parameters
----------
address : string
The IP address associated to the Mecademic robot.
firmware_version : string
Firmware version of the Mecademic Robot.
"""
self.address = address
self.socket = None
self.robot_status = ()
self.gripper_status = ()
self.joints = () #Joint Angles, angles in degrees | [theta_1, theta_2, ... theta_n]
self.cartesian = () #Cartesian coordinates, distances in mm, angles in degrees | [x,y,z,alpha,beta,gamma]
self.joints_vel =()
self.torque =()
self.accelerometer =()
self.last_msg_chunk = ''
a = re.search(r'(\d+)\.(\d+)\.(\d+)', firmware_version)
self.version = a.group(0)
self.version_regex = [int(a.group(1)), int(a.group(2)), int(a.group(3))]
def connect(self):
"""Connects Mecademic Robot object communication to the physical Mecademic Robot.
Returns
-------
status : boolean
Return whether the connection is established.
"""
try:
self.socket = socket.socket()
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,1)
self.socket.settimeout(1) #1s
try:
self.socket.connect((self.address, 10001)) #connect to the robot's address
except socket.timeout: #catch if the robot is not connected to in time
#raise TimeoutError
raise RuntimeError
# Receive confirmation of connection
if self.socket is None: #check that socket is not connected to nothing
raise RuntimeError
self.socket.settimeout(1) #1s
try:
if(self.version_regex[0] <= 7):
self.get_data()
elif(self.version_regex[0] > 7): #RobotStatus and GripperStatus are sent on 10001 upon connecting from 8.x firmware
msg = self.socket.recv(256).decode('ascii') #read message from robot
self._get_robot_status(msg)
self._get_gripper_status(msg)
return True
except socket.timeout:
raise RuntimeError
#except TimeoutError:
#return False
# OTHER USER !!!
except RuntimeError:
return False
def disconnect(self):
"""Disconnects Mecademic Robot object from physical Mecademic Robot.
"""
if self.socket is not None:
self.socket.close()
self.socket = None
def get_data(self, delay=0.1):
"""Receives message from the Mecademic Robot and
saves the values in appropriate variables.
Parameters
----------
delay: int or float
Time to set for timeout of the socket.
"""
if self.socket is None: #check that the connection is established
return #if no connection, nothing to receive
self.socket.settimeout(delay) #set read timeout to desired delay
try:
raw_msg = self.socket.recv(256).decode('ascii') #read message from robot
raw_response = raw_msg.split('\x00') # Split the data at \x00 to manage fragmented data
raw_response[0] = self.last_msg_chunk + raw_response[0] # Merge the first data with last fragment from previous data stream
self.last_msg_chunk = raw_response[-1]
for response in raw_response[:-1]:
if(self.version_regex[0] <= 7):
self._get_joints(response)
self._get_cartesian(response)
elif(self.version_regex[0] > 7):
self._get_joints(response)
self._get_cartesian(response)
self._get_joints_vel(response)
self._get_torque_ratio(response)
self._get_accelerometer(response)
#except TimeoutError:
except RuntimeError:
pass
def _get_robot_status(self, response):
"""Gets the values of RobotStatus bits from the message sent by
the Robot upon connecting.
Values saved to attribute robotstatus of the object.
Parameters
----------
response : string
Message received from the Robot.
"""
code = None
code = self._get_response_code('RobotStatus')
for resp_code in code:
if response.find(resp_code) != -1:
self.robot_status = self._decode_msg(response, resp_code)
def _get_gripper_status(self, response):
"""Gets the values of GripperStatus bits from the message sent by
the Robot upon connecting.
Values saved to attribute robotstatus of the object.
Parameters
----------
response : string
Message received from the robot.
"""
code = None
code = self._get_response_code('GripperStatus')
for resp_code in code:
if response.find(resp_code) != -1:
self.gripper_status = self._decode_msg(response,resp_code)
def _get_joints(self, response):
"""Gets the joint values of the variables from the message sent by the Robot.
Values saved to attribute joints of the object.
Parameters
----------
response: string
Message received from the Robot.
"""
code = None
code = self._get_response_code('JointsPose')
for resp_code in code:
if response.find(resp_code) != -1:
self.joints = self._decode_msg(response, resp_code)
def _get_cartesian(self, response):
"""Gets the cartesian values of the variables from the message sent by the Robot.
Values saved to attribute cartesian of the object.
Parameters
----------
response : string
Message received from the Robot.
"""
code = None
code = self._get_response_code('CartesianPose')
for resp_code in code:
if response.find(resp_code) != -1:
self.cartesian = self._decode_msg(response,resp_code)
def _get_joints_vel(self, response):
"""Gets the velocity values of the Joints from the message sent by the Robot.
Values saved to attribute jointsvel of the object.
Parameters
----------
response : string
Message received from the Robot.
"""
code = None
code = self._get_response_code('JointsVel')
for resp_code in code:
if response.find(resp_code) != -1:
self.joints_vel = self._decode_msg(response,resp_code)
def _get_torque_ratio(self, response):
"""Gets the torque ratio values of the Joints from the message sent by the Robot.
Values saved to attribute torque of the object.
Parameters
----------
response : string
Message received from the Robot.
"""
code = None
code = self._get_response_code('TorqueRatio')
for resp_code in code:
if response.find(resp_code) != -1:
self.torque = self._decode_msg(response,resp_code)
def _get_accelerometer(self,response):
"""Gets the accelerometers values from the message sent by the Robot.
Values saved to attribute accelerometer of the object.
Parameters
----------
response : string
Message received from the Robot.
"""
code = None
code = self._get_response_code('AccelerometerData')
for resp_code in code:
if response.find(resp_code) != -1:
self.accelerometer = self._decode_msg(response,resp_code)
def _get_response_code(self, param):
"""Retreives the response code for the parameters being streamed on port 100001.
Parameters
----------
param : string
Parameter that needs to be extracted from raw data strem from Mecademic Robot.
1. Robot Status {sent only once upon connecting on 10001}.
2. Gripper Status {sent only once upon connecting on 10001}.
3. Joints Pose feedback.
4. Cartesian Pose feedback.
5. Joints Velocity feedback.
6. Torque Ratio.
7. Accelerometer data.
Returns
--------
answer_list : list of strings
List of response codes to search for in the raw data stream.
"""
if param.find('RobotStatus') != -1:
return ['[2007]']
elif param.find('GripperStatus')!= -1:
return ['[2079]']
elif param.find('JointsPose') != -1:
if(self.version_regex[0] <= 7):
return ['[2102]']
elif(self.version_regex[0] > 7):
return ['[2026]','[2210]']
elif param.find('CartesianPose') != -1:
if(self.version_regex[0] <= 7):
return ['[2103]']
elif(self.version_regex[0] > 7):
return ['[2027]','[2211]']
elif param.find('JointsVel') != -1:
return ['[2212]']
elif param.find('TorqueRatio') != -1:
return ['[2213]']
elif param.find('AccelerometerData') != -1:
return ['[2220]']
else:
return ['Invalid']
def _decode_msg(self, response, resp_code):
"""
Parameters
----------
response : string
Message received from the Robot.
resp_code : string
Message to decode
Returns
--------
params : tuplt of float
Message decoded.
"""
response = response.replace(resp_code+'[','').replace(']','')
params = ()
if response != '':
param_str = response.split(',')
if len(param_str) == 6:
params = tuple((float(x) for x in param_str))
elif len(param_str) == 7:
params = tuple((float(x) for x in param_str[1:])) # remove timestamp
else:
params =()
return params
| 35.310976
| 135
| 0.567864
|
import socket
import re
class RobotFeedback:
def __init__(self, address, firmware_version):
self.address = address
self.socket = None
self.robot_status = ()
self.gripper_status = ()
self.joints = () self.cartesian = () self.joints_vel =()
self.torque =()
self.accelerometer =()
self.last_msg_chunk = ''
a = re.search(r'(\d+)\.(\d+)\.(\d+)', firmware_version)
self.version = a.group(0)
self.version_regex = [int(a.group(1)), int(a.group(2)), int(a.group(3))]
def connect(self):
try:
self.socket = socket.socket()
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,1)
self.socket.settimeout(1) try:
self.socket.connect((self.address, 10001)) except socket.timeout: #catch if the robot is not connected to in time
#raise TimeoutError
raise RuntimeError
# Receive confirmation of connection
if self.socket is None: #check that socket is not connected to nothing
raise RuntimeError
self.socket.settimeout(1) #1s
try:
if(self.version_regex[0] <= 7):
self.get_data()
elif(self.version_regex[0] > 7): #RobotStatus and GripperStatus are sent on 10001 upon connecting from 8.x firmware
msg = self.socket.recv(256).decode('ascii') #read message from robot
self._get_robot_status(msg)
self._get_gripper_status(msg)
return True
except socket.timeout:
raise RuntimeError
#except TimeoutError:
#return False
# OTHER USER !!!
except RuntimeError:
return False
def disconnect(self):
if self.socket is not None:
self.socket.close()
self.socket = None
def get_data(self, delay=0.1):
if self.socket is None: #check that the connection is established
return #if no connection, nothing to receive
self.socket.settimeout(delay) #set read timeout to desired delay
try:
raw_msg = self.socket.recv(256).decode('ascii') #read message from robot
raw_response = raw_msg.split('\x00') # Split the data at \x00 to manage fragmented data
raw_response[0] = self.last_msg_chunk + raw_response[0] # Merge the first data with last fragment from previous data stream
self.last_msg_chunk = raw_response[-1]
for response in raw_response[:-1]:
if(self.version_regex[0] <= 7):
self._get_joints(response)
self._get_cartesian(response)
elif(self.version_regex[0] > 7):
self._get_joints(response)
self._get_cartesian(response)
self._get_joints_vel(response)
self._get_torque_ratio(response)
self._get_accelerometer(response)
#except TimeoutError:
except RuntimeError:
pass
def _get_robot_status(self, response):
code = None
code = self._get_response_code('RobotStatus')
for resp_code in code:
if response.find(resp_code) != -1:
self.robot_status = self._decode_msg(response, resp_code)
def _get_gripper_status(self, response):
code = None
code = self._get_response_code('GripperStatus')
for resp_code in code:
if response.find(resp_code) != -1:
self.gripper_status = self._decode_msg(response,resp_code)
def _get_joints(self, response):
code = None
code = self._get_response_code('JointsPose')
for resp_code in code:
if response.find(resp_code) != -1:
self.joints = self._decode_msg(response, resp_code)
def _get_cartesian(self, response):
code = None
code = self._get_response_code('CartesianPose')
for resp_code in code:
if response.find(resp_code) != -1:
self.cartesian = self._decode_msg(response,resp_code)
def _get_joints_vel(self, response):
code = None
code = self._get_response_code('JointsVel')
for resp_code in code:
if response.find(resp_code) != -1:
self.joints_vel = self._decode_msg(response,resp_code)
def _get_torque_ratio(self, response):
code = None
code = self._get_response_code('TorqueRatio')
for resp_code in code:
if response.find(resp_code) != -1:
self.torque = self._decode_msg(response,resp_code)
def _get_accelerometer(self,response):
code = None
code = self._get_response_code('AccelerometerData')
for resp_code in code:
if response.find(resp_code) != -1:
self.accelerometer = self._decode_msg(response,resp_code)
def _get_response_code(self, param):
if param.find('RobotStatus') != -1:
return ['[2007]']
elif param.find('GripperStatus')!= -1:
return ['[2079]']
elif param.find('JointsPose') != -1:
if(self.version_regex[0] <= 7):
return ['[2102]']
elif(self.version_regex[0] > 7):
return ['[2026]','[2210]']
elif param.find('CartesianPose') != -1:
if(self.version_regex[0] <= 7):
return ['[2103]']
elif(self.version_regex[0] > 7):
return ['[2027]','[2211]']
elif param.find('JointsVel') != -1:
return ['[2212]']
elif param.find('TorqueRatio') != -1:
return ['[2213]']
elif param.find('AccelerometerData') != -1:
return ['[2220]']
else:
return ['Invalid']
def _decode_msg(self, response, resp_code):
response = response.replace(resp_code+'[','').replace(']','')
params = ()
if response != '':
param_str = response.split(',')
if len(param_str) == 6:
params = tuple((float(x) for x in param_str))
elif len(param_str) == 7:
params = tuple((float(x) for x in param_str[1:])) # remove timestamp
else:
params =()
return params
| true
| true
|
f7044144ec2809f9d7962b59fb909c9753171af5
| 19,423
|
py
|
Python
|
tests/model_inheritance_regress/tests.py
|
indevgr/django
|
0247c9b08f8da4a2d93b9cede6c615011552b55a
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2017-01-11T06:27:15.000Z
|
2017-01-11T06:27:15.000Z
|
tests/model_inheritance_regress/tests.py
|
indevgr/django
|
0247c9b08f8da4a2d93b9cede6c615011552b55a
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/model_inheritance_regress/tests.py
|
indevgr/django
|
0247c9b08f8da4a2d93b9cede6c615011552b55a
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2019-10-22T12:16:53.000Z
|
2019-10-22T12:16:53.000Z
|
"""
Regression tests for Model inheritance behavior.
"""
from __future__ import unicode_literals
import datetime
from operator import attrgetter
from unittest import expectedFailure
from django import forms
from django.test import TestCase
from .models import (
ArticleWithAuthor, BachelorParty, BirthdayParty, BusStation, Child,
DerivedM, InternalCertificationAudit, ItalianRestaurant, M2MChild,
MessyBachelorParty, ParkingLot, ParkingLot2, ParkingLot3, ParkingLot4A,
ParkingLot4B, Person, Place, Profile, QualityControl, Restaurant,
SelfRefChild, SelfRefParent, Senator, Supplier, TrainStation, User,
Wholesaler,
)
class ModelInheritanceTest(TestCase):
def test_model_inheritance(self):
# Regression for #7350, #7202
# Check that when you create a Parent object with a specific reference
# to an existent child instance, saving the Parent doesn't duplicate
# the child. This behavior is only activated during a raw save - it
# is mostly relevant to deserialization, but any sort of CORBA style
# 'narrow()' API would require a similar approach.
# Create a child-parent-grandparent chain
place1 = Place(
name="Guido's House of Pasta",
address='944 W. Fullerton')
place1.save_base(raw=True)
restaurant = Restaurant(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False)
restaurant.save_base(raw=True)
italian_restaurant = ItalianRestaurant(
restaurant_ptr=restaurant,
serves_gnocchi=True)
italian_restaurant.save_base(raw=True)
# Create a child-parent chain with an explicit parent link
place2 = Place(name='Main St', address='111 Main St')
place2.save_base(raw=True)
park = ParkingLot(parent=place2, capacity=100)
park.save_base(raw=True)
# Check that no extra parent objects have been created.
places = list(Place.objects.all())
self.assertEqual(places, [place1, place2])
dicts = list(Restaurant.objects.values('name', 'serves_hot_dogs'))
self.assertEqual(dicts, [{
'name': "Guido's House of Pasta",
'serves_hot_dogs': True
}])
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's House of Pasta",
'serves_gnocchi': True,
'serves_hot_dogs': True,
}])
dicts = list(ParkingLot.objects.values('name', 'capacity'))
self.assertEqual(dicts, [{
'capacity': 100,
'name': 'Main St',
}])
# You can also update objects when using a raw save.
place1.name = "Guido's All New House of Pasta"
place1.save_base(raw=True)
restaurant.serves_hot_dogs = False
restaurant.save_base(raw=True)
italian_restaurant.serves_gnocchi = False
italian_restaurant.save_base(raw=True)
place2.name = 'Derelict lot'
place2.save_base(raw=True)
park.capacity = 50
park.save_base(raw=True)
# No extra parent objects after an update, either.
places = list(Place.objects.all())
self.assertEqual(places, [place2, place1])
self.assertEqual(places[0].name, 'Derelict lot')
self.assertEqual(places[1].name, "Guido's All New House of Pasta")
dicts = list(Restaurant.objects.values('name', 'serves_hot_dogs'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_hot_dogs': False,
}])
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_gnocchi': False,
'serves_hot_dogs': False,
}])
dicts = list(ParkingLot.objects.values('name', 'capacity'))
self.assertEqual(dicts, [{
'capacity': 50,
'name': 'Derelict lot',
}])
# If you try to raw_save a parent attribute onto a child object,
# the attribute will be ignored.
italian_restaurant.name = "Lorenzo's Pasta Hut"
italian_restaurant.save_base(raw=True)
# Note that the name has not changed
# - name is an attribute of Place, not ItalianRestaurant
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_gnocchi': False,
'serves_hot_dogs': False,
}])
def test_issue_7105(self):
# Regressions tests for #7105: dates() queries should be able to use
# fields from the parent model as easily as the child.
Child.objects.create(
name='child',
created=datetime.datetime(2008, 6, 26, 17, 0, 0))
datetimes = list(Child.objects.datetimes('created', 'month'))
self.assertEqual(datetimes, [datetime.datetime(2008, 6, 1, 0, 0)])
def test_issue_7276(self):
# Regression test for #7276: calling delete() on a model with
# multi-table inheritance should delete the associated rows from any
# ancestor tables, as well as any descendent objects.
place1 = Place(
name="Guido's House of Pasta",
address='944 W. Fullerton')
place1.save_base(raw=True)
restaurant = Restaurant(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False)
restaurant.save_base(raw=True)
italian_restaurant = ItalianRestaurant(
restaurant_ptr=restaurant,
serves_gnocchi=True)
italian_restaurant.save_base(raw=True)
ident = ItalianRestaurant.objects.all()[0].id
self.assertEqual(Place.objects.get(pk=ident), place1)
Restaurant.objects.create(
name='a',
address='xx',
serves_hot_dogs=True,
serves_pizza=False)
# This should delete both Restaurants, plus the related places, plus
# the ItalianRestaurant.
Restaurant.objects.all().delete()
with self.assertRaises(Place.DoesNotExist):
Place.objects.get(pk=ident)
with self.assertRaises(ItalianRestaurant.DoesNotExist):
ItalianRestaurant.objects.get(pk=ident)
def test_issue_6755(self):
"""
Regression test for #6755
"""
r = Restaurant(serves_pizza=False, serves_hot_dogs=False)
r.save()
self.assertEqual(r.id, r.place_ptr_id)
orig_id = r.id
r = Restaurant(place_ptr_id=orig_id, serves_pizza=True, serves_hot_dogs=False)
r.save()
self.assertEqual(r.id, orig_id)
self.assertEqual(r.id, r.place_ptr_id)
def test_issue_7488(self):
# Regression test for #7488. This looks a little crazy, but it's the
# equivalent of what the admin interface has to do for the edit-inline
# case.
suppliers = Supplier.objects.filter(
restaurant=Restaurant(name='xx', address='yy'))
suppliers = list(suppliers)
self.assertEqual(suppliers, [])
def test_issue_11764(self):
"""
Regression test for #11764
"""
wholesalers = list(Wholesaler.objects.all().select_related())
self.assertEqual(wholesalers, [])
def test_issue_7853(self):
"""
Regression test for #7853
If the parent class has a self-referential link, make sure that any
updates to that link via the child update the right table.
"""
obj = SelfRefChild.objects.create(child_data=37, parent_data=42)
obj.delete()
def test_get_next_previous_by_date(self):
"""
Regression tests for #8076
get_(next/previous)_by_date should work
"""
c1 = ArticleWithAuthor(
headline='ArticleWithAuthor 1',
author="Person 1",
pub_date=datetime.datetime(2005, 8, 1, 3, 0))
c1.save()
c2 = ArticleWithAuthor(
headline='ArticleWithAuthor 2',
author="Person 2",
pub_date=datetime.datetime(2005, 8, 1, 10, 0))
c2.save()
c3 = ArticleWithAuthor(
headline='ArticleWithAuthor 3',
author="Person 3",
pub_date=datetime.datetime(2005, 8, 2))
c3.save()
self.assertEqual(c1.get_next_by_pub_date(), c2)
self.assertEqual(c2.get_next_by_pub_date(), c3)
with self.assertRaises(ArticleWithAuthor.DoesNotExist):
c3.get_next_by_pub_date()
self.assertEqual(c3.get_previous_by_pub_date(), c2)
self.assertEqual(c2.get_previous_by_pub_date(), c1)
with self.assertRaises(ArticleWithAuthor.DoesNotExist):
c1.get_previous_by_pub_date()
def test_inherited_fields(self):
"""
Regression test for #8825 and #9390
Make sure all inherited fields (esp. m2m fields, in this case) appear
on the child class.
"""
m2mchildren = list(M2MChild.objects.filter(articles__isnull=False))
self.assertEqual(m2mchildren, [])
# Ordering should not include any database column more than once (this
# is most likely to occur naturally with model inheritance, so we
# check it here). Regression test for #9390. This necessarily pokes at
# the SQL string for the query, since the duplicate problems are only
# apparent at that late stage.
qs = ArticleWithAuthor.objects.order_by('pub_date', 'pk')
sql = qs.query.get_compiler(qs.db).as_sql()[0]
fragment = sql[sql.find('ORDER BY'):]
pos = fragment.find('pub_date')
self.assertEqual(fragment.find('pub_date', pos + 1), -1)
def test_queryset_update_on_parent_model(self):
"""
Regression test for #10362
It is possible to call update() and only change a field in
an ancestor model.
"""
article = ArticleWithAuthor.objects.create(
author="fred",
headline="Hey there!",
pub_date=datetime.datetime(2009, 3, 1, 8, 0, 0))
update = ArticleWithAuthor.objects.filter(
author="fred").update(headline="Oh, no!")
self.assertEqual(update, 1)
update = ArticleWithAuthor.objects.filter(
pk=article.pk).update(headline="Oh, no!")
self.assertEqual(update, 1)
derivedm1 = DerivedM.objects.create(
customPK=44,
base_name="b1",
derived_name="d1")
self.assertEqual(derivedm1.customPK, 44)
self.assertEqual(derivedm1.base_name, 'b1')
self.assertEqual(derivedm1.derived_name, 'd1')
derivedms = list(DerivedM.objects.all())
self.assertEqual(derivedms, [derivedm1])
def test_use_explicit_o2o_to_parent_as_pk(self):
"""
Regression tests for #10406
If there's a one-to-one link between a child model and the parent and
no explicit pk declared, we can use the one-to-one link as the pk on
the child.
"""
self.assertEqual(ParkingLot2._meta.pk.name, "parent")
# However, the connector from child to parent need not be the pk on
# the child at all.
self.assertEqual(ParkingLot3._meta.pk.name, "primary_key")
# the child->parent link
self.assertEqual(
ParkingLot3._meta.get_ancestor_link(Place).name,
"parent")
def test_use_explicit_o2o_to_parent_from_abstract_model(self):
self.assertEqual(ParkingLot4A._meta.pk.name, "parent")
ParkingLot4A.objects.create(
name="Parking4A",
address='21 Jump Street',
)
self.assertEqual(ParkingLot4B._meta.pk.name, "parent")
ParkingLot4A.objects.create(
name="Parking4B",
address='21 Jump Street',
)
def test_all_fields_from_abstract_base_class(self):
"""
Regression tests for #7588
"""
# All fields from an ABC, including those inherited non-abstractly
# should be available on child classes (#7588). Creating this instance
# should work without error.
QualityControl.objects.create(
headline="Problems in Django",
pub_date=datetime.datetime.now(),
quality=10,
assignee="adrian")
def test_abstract_base_class_m2m_relation_inheritance(self):
# Check that many-to-many relations defined on an abstract base class
# are correctly inherited (and created) on the child class.
p1 = Person.objects.create(name='Alice')
p2 = Person.objects.create(name='Bob')
p3 = Person.objects.create(name='Carol')
p4 = Person.objects.create(name='Dave')
birthday = BirthdayParty.objects.create(
name='Birthday party for Alice')
birthday.attendees.set([p1, p3])
bachelor = BachelorParty.objects.create(name='Bachelor party for Bob')
bachelor.attendees.set([p2, p4])
parties = list(p1.birthdayparty_set.all())
self.assertEqual(parties, [birthday])
parties = list(p1.bachelorparty_set.all())
self.assertEqual(parties, [])
parties = list(p2.bachelorparty_set.all())
self.assertEqual(parties, [bachelor])
# Check that a subclass of a subclass of an abstract model doesn't get
# its own accessor.
self.assertFalse(hasattr(p2, 'messybachelorparty_set'))
# ... but it does inherit the m2m from its parent
messy = MessyBachelorParty.objects.create(
name='Bachelor party for Dave')
messy.attendees.set([p4])
messy_parent = messy.bachelorparty_ptr
parties = list(p4.bachelorparty_set.all())
self.assertEqual(parties, [bachelor, messy_parent])
def test_abstract_verbose_name_plural_inheritance(self):
"""
verbose_name_plural correctly inherited from ABC if inheritance chain
includes an abstract model.
"""
# Regression test for #11369: verbose_name_plural should be inherited
# from an ABC even when there are one or more intermediate
# abstract models in the inheritance chain, for consistency with
# verbose_name.
self.assertEqual(
InternalCertificationAudit._meta.verbose_name_plural,
'Audits'
)
def test_inherited_nullable_exclude(self):
obj = SelfRefChild.objects.create(child_data=37, parent_data=42)
self.assertQuerysetEqual(
SelfRefParent.objects.exclude(self_data=72), [
obj.pk
],
attrgetter("pk")
)
self.assertQuerysetEqual(
SelfRefChild.objects.exclude(self_data=72), [
obj.pk
],
attrgetter("pk")
)
def test_concrete_abstract_concrete_pk(self):
"""
Primary key set correctly with concrete->abstract->concrete inheritance.
"""
# Regression test for #13987: Primary key is incorrectly determined
# when more than one model has a concrete->abstract->concrete
# inheritance hierarchy.
self.assertEqual(
len([field for field in BusStation._meta.local_fields if field.primary_key]),
1
)
self.assertEqual(
len([field for field in TrainStation._meta.local_fields if field.primary_key]),
1
)
self.assertIs(BusStation._meta.pk.model, BusStation)
self.assertIs(TrainStation._meta.pk.model, TrainStation)
def test_inherited_unique_field_with_form(self):
"""
Test that a model which has different primary key for the parent model
passes unique field checking correctly. Refs #17615.
"""
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = '__all__'
User.objects.create(username="user_only")
p = Profile.objects.create(username="user_with_profile")
form = ProfileForm({'username': "user_with_profile", 'extra': "hello"},
instance=p)
self.assertTrue(form.is_valid())
def test_inheritance_joins(self):
# Test for #17502 - check that filtering through two levels of
# inheritance chain doesn't generate extra joins.
qs = ItalianRestaurant.objects.all()
self.assertEqual(str(qs.query).count('JOIN'), 2)
qs = ItalianRestaurant.objects.filter(name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 2)
@expectedFailure
def test_inheritance_values_joins(self):
# It would be nice (but not too important) to skip the middle join in
# this case. Skipping is possible as nothing from the middle model is
# used in the qs and top contains direct pointer to the bottom model.
qs = ItalianRestaurant.objects.values_list('serves_gnocchi').filter(name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_issue_21554(self):
senator = Senator.objects.create(
name='John Doe', title='X', state='Y'
)
senator = Senator.objects.get(pk=senator.pk)
self.assertEqual(senator.name, 'John Doe')
self.assertEqual(senator.title, 'X')
self.assertEqual(senator.state, 'Y')
def test_inheritance_resolve_columns(self):
Restaurant.objects.create(name='Bobs Cafe', address="Somewhere",
serves_pizza=True, serves_hot_dogs=True)
p = Place.objects.all().select_related('restaurant')[0]
self.assertIsInstance(p.restaurant.serves_pizza, bool)
def test_inheritance_select_related(self):
# Regression test for #7246
r1 = Restaurant.objects.create(
name="Nobu", serves_hot_dogs=True, serves_pizza=False
)
r2 = Restaurant.objects.create(
name="Craft", serves_hot_dogs=False, serves_pizza=True
)
Supplier.objects.create(name="John", restaurant=r1)
Supplier.objects.create(name="Jane", restaurant=r2)
self.assertQuerysetEqual(
Supplier.objects.order_by("name").select_related(), [
"Jane",
"John",
],
attrgetter("name")
)
jane = Supplier.objects.order_by("name").select_related("restaurant")[0]
self.assertEqual(jane.restaurant.name, "Craft")
def test_related_filtering_query_efficiency_ticket_15844(self):
r = Restaurant.objects.create(
name="Guido's House of Pasta",
address='944 W. Fullerton',
serves_hot_dogs=True,
serves_pizza=False,
)
s = Supplier.objects.create(restaurant=r)
with self.assertNumQueries(1):
self.assertQuerysetEqual(
Supplier.objects.filter(restaurant=r),
[s], lambda x: x,
)
with self.assertNumQueries(1):
self.assertQuerysetEqual(
r.supplier_set.all(),
[s], lambda x: x,
)
| 38.159136
| 91
| 0.622973
|
from __future__ import unicode_literals
import datetime
from operator import attrgetter
from unittest import expectedFailure
from django import forms
from django.test import TestCase
from .models import (
ArticleWithAuthor, BachelorParty, BirthdayParty, BusStation, Child,
DerivedM, InternalCertificationAudit, ItalianRestaurant, M2MChild,
MessyBachelorParty, ParkingLot, ParkingLot2, ParkingLot3, ParkingLot4A,
ParkingLot4B, Person, Place, Profile, QualityControl, Restaurant,
SelfRefChild, SelfRefParent, Senator, Supplier, TrainStation, User,
Wholesaler,
)
class ModelInheritanceTest(TestCase):
def test_model_inheritance(self):
# the child. This behavior is only activated during a raw save - it
# is mostly relevant to deserialization, but any sort of CORBA style
# 'narrow()' API would require a similar approach.
# Create a child-parent-grandparent chain
place1 = Place(
name="Guido's House of Pasta",
address='944 W. Fullerton')
place1.save_base(raw=True)
restaurant = Restaurant(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False)
restaurant.save_base(raw=True)
italian_restaurant = ItalianRestaurant(
restaurant_ptr=restaurant,
serves_gnocchi=True)
italian_restaurant.save_base(raw=True)
place2 = Place(name='Main St', address='111 Main St')
place2.save_base(raw=True)
park = ParkingLot(parent=place2, capacity=100)
park.save_base(raw=True)
places = list(Place.objects.all())
self.assertEqual(places, [place1, place2])
dicts = list(Restaurant.objects.values('name', 'serves_hot_dogs'))
self.assertEqual(dicts, [{
'name': "Guido's House of Pasta",
'serves_hot_dogs': True
}])
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's House of Pasta",
'serves_gnocchi': True,
'serves_hot_dogs': True,
}])
dicts = list(ParkingLot.objects.values('name', 'capacity'))
self.assertEqual(dicts, [{
'capacity': 100,
'name': 'Main St',
}])
place1.name = "Guido's All New House of Pasta"
place1.save_base(raw=True)
restaurant.serves_hot_dogs = False
restaurant.save_base(raw=True)
italian_restaurant.serves_gnocchi = False
italian_restaurant.save_base(raw=True)
place2.name = 'Derelict lot'
place2.save_base(raw=True)
park.capacity = 50
park.save_base(raw=True)
# No extra parent objects after an update, either.
places = list(Place.objects.all())
self.assertEqual(places, [place2, place1])
self.assertEqual(places[0].name, 'Derelict lot')
self.assertEqual(places[1].name, "Guido's All New House of Pasta")
dicts = list(Restaurant.objects.values('name', 'serves_hot_dogs'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_hot_dogs': False,
}])
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_gnocchi': False,
'serves_hot_dogs': False,
}])
dicts = list(ParkingLot.objects.values('name', 'capacity'))
self.assertEqual(dicts, [{
'capacity': 50,
'name': 'Derelict lot',
}])
italian_restaurant.name = "Lorenzo's Pasta Hut"
italian_restaurant.save_base(raw=True)
# Note that the name has not changed
# - name is an attribute of Place, not ItalianRestaurant
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_gnocchi': False,
'serves_hot_dogs': False,
}])
def test_issue_7105(self):
Child.objects.create(
name='child',
created=datetime.datetime(2008, 6, 26, 17, 0, 0))
datetimes = list(Child.objects.datetimes('created', 'month'))
self.assertEqual(datetimes, [datetime.datetime(2008, 6, 1, 0, 0)])
def test_issue_7276(self):
place1 = Place(
name="Guido's House of Pasta",
address='944 W. Fullerton')
place1.save_base(raw=True)
restaurant = Restaurant(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False)
restaurant.save_base(raw=True)
italian_restaurant = ItalianRestaurant(
restaurant_ptr=restaurant,
serves_gnocchi=True)
italian_restaurant.save_base(raw=True)
ident = ItalianRestaurant.objects.all()[0].id
self.assertEqual(Place.objects.get(pk=ident), place1)
Restaurant.objects.create(
name='a',
address='xx',
serves_hot_dogs=True,
serves_pizza=False)
# This should delete both Restaurants, plus the related places, plus
# the ItalianRestaurant.
Restaurant.objects.all().delete()
with self.assertRaises(Place.DoesNotExist):
Place.objects.get(pk=ident)
with self.assertRaises(ItalianRestaurant.DoesNotExist):
ItalianRestaurant.objects.get(pk=ident)
def test_issue_6755(self):
r = Restaurant(serves_pizza=False, serves_hot_dogs=False)
r.save()
self.assertEqual(r.id, r.place_ptr_id)
orig_id = r.id
r = Restaurant(place_ptr_id=orig_id, serves_pizza=True, serves_hot_dogs=False)
r.save()
self.assertEqual(r.id, orig_id)
self.assertEqual(r.id, r.place_ptr_id)
def test_issue_7488(self):
# Regression test for #7488. This looks a little crazy, but it's the
suppliers = Supplier.objects.filter(
restaurant=Restaurant(name='xx', address='yy'))
suppliers = list(suppliers)
self.assertEqual(suppliers, [])
def test_issue_11764(self):
wholesalers = list(Wholesaler.objects.all().select_related())
self.assertEqual(wholesalers, [])
def test_issue_7853(self):
obj = SelfRefChild.objects.create(child_data=37, parent_data=42)
obj.delete()
def test_get_next_previous_by_date(self):
c1 = ArticleWithAuthor(
headline='ArticleWithAuthor 1',
author="Person 1",
pub_date=datetime.datetime(2005, 8, 1, 3, 0))
c1.save()
c2 = ArticleWithAuthor(
headline='ArticleWithAuthor 2',
author="Person 2",
pub_date=datetime.datetime(2005, 8, 1, 10, 0))
c2.save()
c3 = ArticleWithAuthor(
headline='ArticleWithAuthor 3',
author="Person 3",
pub_date=datetime.datetime(2005, 8, 2))
c3.save()
self.assertEqual(c1.get_next_by_pub_date(), c2)
self.assertEqual(c2.get_next_by_pub_date(), c3)
with self.assertRaises(ArticleWithAuthor.DoesNotExist):
c3.get_next_by_pub_date()
self.assertEqual(c3.get_previous_by_pub_date(), c2)
self.assertEqual(c2.get_previous_by_pub_date(), c1)
with self.assertRaises(ArticleWithAuthor.DoesNotExist):
c1.get_previous_by_pub_date()
def test_inherited_fields(self):
m2mchildren = list(M2MChild.objects.filter(articles__isnull=False))
self.assertEqual(m2mchildren, [])
qs = ArticleWithAuthor.objects.order_by('pub_date', 'pk')
sql = qs.query.get_compiler(qs.db).as_sql()[0]
fragment = sql[sql.find('ORDER BY'):]
pos = fragment.find('pub_date')
self.assertEqual(fragment.find('pub_date', pos + 1), -1)
def test_queryset_update_on_parent_model(self):
article = ArticleWithAuthor.objects.create(
author="fred",
headline="Hey there!",
pub_date=datetime.datetime(2009, 3, 1, 8, 0, 0))
update = ArticleWithAuthor.objects.filter(
author="fred").update(headline="Oh, no!")
self.assertEqual(update, 1)
update = ArticleWithAuthor.objects.filter(
pk=article.pk).update(headline="Oh, no!")
self.assertEqual(update, 1)
derivedm1 = DerivedM.objects.create(
customPK=44,
base_name="b1",
derived_name="d1")
self.assertEqual(derivedm1.customPK, 44)
self.assertEqual(derivedm1.base_name, 'b1')
self.assertEqual(derivedm1.derived_name, 'd1')
derivedms = list(DerivedM.objects.all())
self.assertEqual(derivedms, [derivedm1])
def test_use_explicit_o2o_to_parent_as_pk(self):
self.assertEqual(ParkingLot2._meta.pk.name, "parent")
self.assertEqual(ParkingLot3._meta.pk.name, "primary_key")
self.assertEqual(
ParkingLot3._meta.get_ancestor_link(Place).name,
"parent")
def test_use_explicit_o2o_to_parent_from_abstract_model(self):
self.assertEqual(ParkingLot4A._meta.pk.name, "parent")
ParkingLot4A.objects.create(
name="Parking4A",
address='21 Jump Street',
)
self.assertEqual(ParkingLot4B._meta.pk.name, "parent")
ParkingLot4A.objects.create(
name="Parking4B",
address='21 Jump Street',
)
def test_all_fields_from_abstract_base_class(self):
QualityControl.objects.create(
headline="Problems in Django",
pub_date=datetime.datetime.now(),
quality=10,
assignee="adrian")
def test_abstract_base_class_m2m_relation_inheritance(self):
p1 = Person.objects.create(name='Alice')
p2 = Person.objects.create(name='Bob')
p3 = Person.objects.create(name='Carol')
p4 = Person.objects.create(name='Dave')
birthday = BirthdayParty.objects.create(
name='Birthday party for Alice')
birthday.attendees.set([p1, p3])
bachelor = BachelorParty.objects.create(name='Bachelor party for Bob')
bachelor.attendees.set([p2, p4])
parties = list(p1.birthdayparty_set.all())
self.assertEqual(parties, [birthday])
parties = list(p1.bachelorparty_set.all())
self.assertEqual(parties, [])
parties = list(p2.bachelorparty_set.all())
self.assertEqual(parties, [bachelor])
# its own accessor.
self.assertFalse(hasattr(p2, 'messybachelorparty_set'))
# ... but it does inherit the m2m from its parent
messy = MessyBachelorParty.objects.create(
name='Bachelor party for Dave')
messy.attendees.set([p4])
messy_parent = messy.bachelorparty_ptr
parties = list(p4.bachelorparty_set.all())
self.assertEqual(parties, [bachelor, messy_parent])
def test_abstract_verbose_name_plural_inheritance(self):
# Regression test for #11369: verbose_name_plural should be inherited
# from an ABC even when there are one or more intermediate
# abstract models in the inheritance chain, for consistency with
# verbose_name.
self.assertEqual(
InternalCertificationAudit._meta.verbose_name_plural,
'Audits'
)
def test_inherited_nullable_exclude(self):
obj = SelfRefChild.objects.create(child_data=37, parent_data=42)
self.assertQuerysetEqual(
SelfRefParent.objects.exclude(self_data=72), [
obj.pk
],
attrgetter("pk")
)
self.assertQuerysetEqual(
SelfRefChild.objects.exclude(self_data=72), [
obj.pk
],
attrgetter("pk")
)
def test_concrete_abstract_concrete_pk(self):
# Regression test for #13987: Primary key is incorrectly determined
# when more than one model has a concrete->abstract->concrete
# inheritance hierarchy.
self.assertEqual(
len([field for field in BusStation._meta.local_fields if field.primary_key]),
1
)
self.assertEqual(
len([field for field in TrainStation._meta.local_fields if field.primary_key]),
1
)
self.assertIs(BusStation._meta.pk.model, BusStation)
self.assertIs(TrainStation._meta.pk.model, TrainStation)
def test_inherited_unique_field_with_form(self):
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = '__all__'
User.objects.create(username="user_only")
p = Profile.objects.create(username="user_with_profile")
form = ProfileForm({'username': "user_with_profile", 'extra': "hello"},
instance=p)
self.assertTrue(form.is_valid())
def test_inheritance_joins(self):
# Test for #17502 - check that filtering through two levels of
# inheritance chain doesn't generate extra joins.
qs = ItalianRestaurant.objects.all()
self.assertEqual(str(qs.query).count('JOIN'), 2)
qs = ItalianRestaurant.objects.filter(name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 2)
@expectedFailure
def test_inheritance_values_joins(self):
qs = ItalianRestaurant.objects.values_list('serves_gnocchi').filter(name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_issue_21554(self):
senator = Senator.objects.create(
name='John Doe', title='X', state='Y'
)
senator = Senator.objects.get(pk=senator.pk)
self.assertEqual(senator.name, 'John Doe')
self.assertEqual(senator.title, 'X')
self.assertEqual(senator.state, 'Y')
def test_inheritance_resolve_columns(self):
Restaurant.objects.create(name='Bobs Cafe', address="Somewhere",
serves_pizza=True, serves_hot_dogs=True)
p = Place.objects.all().select_related('restaurant')[0]
self.assertIsInstance(p.restaurant.serves_pizza, bool)
def test_inheritance_select_related(self):
r1 = Restaurant.objects.create(
name="Nobu", serves_hot_dogs=True, serves_pizza=False
)
r2 = Restaurant.objects.create(
name="Craft", serves_hot_dogs=False, serves_pizza=True
)
Supplier.objects.create(name="John", restaurant=r1)
Supplier.objects.create(name="Jane", restaurant=r2)
self.assertQuerysetEqual(
Supplier.objects.order_by("name").select_related(), [
"Jane",
"John",
],
attrgetter("name")
)
jane = Supplier.objects.order_by("name").select_related("restaurant")[0]
self.assertEqual(jane.restaurant.name, "Craft")
def test_related_filtering_query_efficiency_ticket_15844(self):
r = Restaurant.objects.create(
name="Guido's House of Pasta",
address='944 W. Fullerton',
serves_hot_dogs=True,
serves_pizza=False,
)
s = Supplier.objects.create(restaurant=r)
with self.assertNumQueries(1):
self.assertQuerysetEqual(
Supplier.objects.filter(restaurant=r),
[s], lambda x: x,
)
with self.assertNumQueries(1):
self.assertQuerysetEqual(
r.supplier_set.all(),
[s], lambda x: x,
)
| true
| true
|
f704431757b191fd6a6405e1724d23679ca1b2f0
| 1,173
|
py
|
Python
|
script/app/agg.py
|
Intelligent-Systems-Lab/ISL-BCFL
|
42ceb86708a76e28b31c22b33c15ee9a6a745ec7
|
[
"Apache-2.0"
] | null | null | null |
script/app/agg.py
|
Intelligent-Systems-Lab/ISL-BCFL
|
42ceb86708a76e28b31c22b33c15ee9a6a745ec7
|
[
"Apache-2.0"
] | null | null | null |
script/app/agg.py
|
Intelligent-Systems-Lab/ISL-BCFL
|
42ceb86708a76e28b31c22b33c15ee9a6a745ec7
|
[
"Apache-2.0"
] | null | null | null |
import os
# import torch
import argparse
import base64
import sys
import io
import torch
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
def fullmodel2base64(model):
buffer = io.BytesIO()
torch.save(model, buffer)
bg = buffer.getvalue()
return base64.b64encode(bg).decode()
def base642fullmodel(modbase64):
inputrpc = bytes(modbase64.encode())
inputrpc_ = base64.b64decode(inputrpc)
loadmodel = torch.load(io.BytesIO(inputrpc_))
return loadmodel
model_list = []
f = open(sys.argv[1], "r")
models = f.read().split(",")
f.close()
print(models)
for m in models:
model_list.append(base642fullmodel(m))
new_model_state = model_list[0].state_dict()
#sum the weight of the model
for m in model_list[1:]:
state_m = m.state_dict()
for key in state_m:
new_model_state[key] += state_m[key]
#average the model weight
for key in new_model_state:
new_model_state[key] /= len(model_list)
new_model = model_list[0]
new_model.load_state_dict(new_model_state)
output = fullmodel2base64(new_model)
print(output)
| 19.55
| 56
| 0.734868
|
import os
import argparse
import base64
import sys
import io
import torch
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
def fullmodel2base64(model):
buffer = io.BytesIO()
torch.save(model, buffer)
bg = buffer.getvalue()
return base64.b64encode(bg).decode()
def base642fullmodel(modbase64):
inputrpc = bytes(modbase64.encode())
inputrpc_ = base64.b64decode(inputrpc)
loadmodel = torch.load(io.BytesIO(inputrpc_))
return loadmodel
model_list = []
f = open(sys.argv[1], "r")
models = f.read().split(",")
f.close()
print(models)
for m in models:
model_list.append(base642fullmodel(m))
new_model_state = model_list[0].state_dict()
for m in model_list[1:]:
state_m = m.state_dict()
for key in state_m:
new_model_state[key] += state_m[key]
for key in new_model_state:
new_model_state[key] /= len(model_list)
new_model = model_list[0]
new_model.load_state_dict(new_model_state)
output = fullmodel2base64(new_model)
print(output)
| true
| true
|
f7044425b96c1b1b74a77404d1717095d1e2e08e
| 192
|
py
|
Python
|
q6.py
|
Babar-Awan/CP19_05
|
5d852cc4bac724aba3acec6bcefc2e3a1d3b0a58
|
[
"MIT"
] | null | null | null |
q6.py
|
Babar-Awan/CP19_05
|
5d852cc4bac724aba3acec6bcefc2e3a1d3b0a58
|
[
"MIT"
] | null | null | null |
q6.py
|
Babar-Awan/CP19_05
|
5d852cc4bac724aba3acec6bcefc2e3a1d3b0a58
|
[
"MIT"
] | null | null | null |
#Question No 6
#Risen Each Year For Next 25 Years
year =1
millimeter= 1.6
while(year<=25):
years=(year * millimeter)
print(" The ocean will rises each year is=" , years,)
year+=1
| 24
| 56
| 0.661458
|
year =1
millimeter= 1.6
while(year<=25):
years=(year * millimeter)
print(" The ocean will rises each year is=" , years,)
year+=1
| true
| true
|
f704446ccf2cd519c05582e5094cbf2d322f8140
| 1,530
|
py
|
Python
|
main.py
|
tomsaudrins/api-service
|
a1262b63b3c11bed373fe12547f3a41b6478d648
|
[
"MIT"
] | null | null | null |
main.py
|
tomsaudrins/api-service
|
a1262b63b3c11bed373fe12547f3a41b6478d648
|
[
"MIT"
] | null | null | null |
main.py
|
tomsaudrins/api-service
|
a1262b63b3c11bed373fe12547f3a41b6478d648
|
[
"MIT"
] | null | null | null |
from fastapi import FastAPI
import uvicorn
from src.routes import (
user,
employee,
car,
inventory,
product,
service,
dealership,
department,
)
from fastapi.middleware.cors import CORSMiddleware
from src.settings.envvariables import Settings
Settings().check_variables()
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Include/define our routes
app.include_router(user.app, prefix="/users", tags=["Users"])
app.include_router(employee.app, prefix="/employees", tags=["Employees"])
app.include_router(car.app, prefix="/cars", tags=["Cars"])
app.include_router(inventory.app, prefix="/inventory", tags=["Inventory"])
app.include_router(product.app, prefix="/products", tags=["Product"])
app.include_router(service.app, prefix="/services/requests", tags=["Service"])
app.include_router(dealership.app, prefix="/dealerships", tags=["Dealership"])
app.include_router(department.app, prefix="/departments", tags=["Department"])
# Launch the app with uvicorn and handle environment
# if Settings().ENV == "prod":
# if __name__ == "__main__":
# print("Launching Production Environment")
# uvicorn.run("main:app", host="0.0.0.0", port=Settings().PORT, reload=False, workers=3)
# else:
# if __name__ == "__main__":
# print("Launching Development Environment")
# uvicorn.run("main:app", host="0.0.0.0", port=Settings().PORT, reload=True, workers=1)
| 32.553191
| 96
| 0.698039
|
from fastapi import FastAPI
import uvicorn
from src.routes import (
user,
employee,
car,
inventory,
product,
service,
dealership,
department,
)
from fastapi.middleware.cors import CORSMiddleware
from src.settings.envvariables import Settings
Settings().check_variables()
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(user.app, prefix="/users", tags=["Users"])
app.include_router(employee.app, prefix="/employees", tags=["Employees"])
app.include_router(car.app, prefix="/cars", tags=["Cars"])
app.include_router(inventory.app, prefix="/inventory", tags=["Inventory"])
app.include_router(product.app, prefix="/products", tags=["Product"])
app.include_router(service.app, prefix="/services/requests", tags=["Service"])
app.include_router(dealership.app, prefix="/dealerships", tags=["Dealership"])
app.include_router(department.app, prefix="/departments", tags=["Department"])
| true
| true
|
f70444a08deb7e59f97195740767e6b3556a8c02
| 4,464
|
py
|
Python
|
server/apps/verticals/shipping/utils/org_quality_report.py
|
iotile/iotile_cloud
|
9dc65ac86d3a730bba42108ed7d9bbb963d22ba6
|
[
"MIT"
] | null | null | null |
server/apps/verticals/shipping/utils/org_quality_report.py
|
iotile/iotile_cloud
|
9dc65ac86d3a730bba42108ed7d9bbb963d22ba6
|
[
"MIT"
] | null | null | null |
server/apps/verticals/shipping/utils/org_quality_report.py
|
iotile/iotile_cloud
|
9dc65ac86d3a730bba42108ed7d9bbb963d22ba6
|
[
"MIT"
] | null | null | null |
from django.db.models import Q
from apps.configattribute.models import ConfigAttribute
from apps.property.models import GenericProperty
from apps.utils.data_helpers.manager import DataManager
from apps.utils.iotile.variable import SYSTEM_VID
from apps.utils.timezone_utils import display_formatted_ts
class TripInfo(object):
block = None
data = {}
slug = None
last_update = None
def __init__(self, block):
self.block = block
self.slug = block.slug
self.data = {
'summary': {},
'properties': {}
}
self.last_update = None
def add_property(self, key, value):
self.data['properties'][key] = value
def add_summary_event(self, event):
if 'summary' in self.data:
if self.last_update and self.last_update > event.timestamp:
return
self.data['summary'] = event.extra_data
# Trip Summary should win over Trip Update
self.last_update = event.timestamp
def to_representation(self):
data = {
'slug': self.slug,
'label': self.block.title,
'summary_date': display_formatted_ts(self.last_update) if self.last_update else '',
'data': self.data
}
return data
class TripOrgQualityReport(object):
org = None
results = {}
config = {}
def __init__(self, org):
self.org = org
self.results = {}
self.config = self._get_config_attributes()
def _get_config_attributes(self):
config_name = ':report:trip_quality:config'
attribute = ConfigAttribute.objects.get_attribute_by_priority(name=config_name, target_slug=self.org.obj_target_slug)
if attribute:
return attribute.data
# Return empty if it does not exist
return {
'summary_keys': [
"Device",
"START (UTC)",
"END (UTC)",
"Duration (Days)",
"Event Count",
"First event at (UTC)",
"Last event at (UTC)",
"Max Humidity (% RH)",
"Min Humidity (% RH)",
"Median Humidity (% RH)",
"Max Pressure (Mbar)",
"Min Pressure (Mbar)",
"Median Pressure (Mbar)",
"Max Temp (C)",
"Min Temp (C)",
"Median Temp (C)",
"Above 30C",
"Below 17C",
"Max Peak (G)",
"TimeStamp(MaxPeak) (UTC)",
"DeltaV at Max Peak (in/s)",
"MaxDeltaV (in/s)",
"TimeStamp(MaxDeltaV) (UTC)",
"Peak at MaxDeltaV (G)"
],
'property_keys': []
}
def analyze(self):
"""
Get all archives for an organization and fill a TripInfo object for each with the following
- Selected trip properties (based on project's configAttribute)
- Last Update Event, if any
- Last Trip Summary Event, if any
:return: Nothing
"""
blocks = self.org.data_blocks.all()
for block in blocks:
self.results[block.slug] = TripInfo(block)
block_slugs = [block.slug for block in blocks]
if self.config and 'property_keys' in self.config:
for property_item in self.config['property_keys']:
properties = GenericProperty.objects.filter(target__in=block_slugs, name=property_item)
for p in properties:
self.results[p.target].add_property(property_item, p.value)
# Not great, but we seem to have blocks with project as None and blocks as p--0000
q = Q(project_slug='') | Q(project_slug='p--0000-0000')
q = q & Q(device_slug__in=block_slugs, variable_slug__icontains=SYSTEM_VID['TRIP_SUMMARY'])
events = DataManager.filter_qs_using_q(
'event',
q=q
)
for event in events:
self.results[event.device_slug].add_summary_event(event)
# Cleanup reports that don't look complete (No Summary or Properties)
to_delete = []
for slug, trip in self.results.items():
if trip.data['summary'] == {}:
# Delete Archive that does not represent a real trip
to_delete.append(slug)
for slug in to_delete:
del(self.results[slug])
| 33.56391
| 125
| 0.5625
|
from django.db.models import Q
from apps.configattribute.models import ConfigAttribute
from apps.property.models import GenericProperty
from apps.utils.data_helpers.manager import DataManager
from apps.utils.iotile.variable import SYSTEM_VID
from apps.utils.timezone_utils import display_formatted_ts
class TripInfo(object):
block = None
data = {}
slug = None
last_update = None
def __init__(self, block):
self.block = block
self.slug = block.slug
self.data = {
'summary': {},
'properties': {}
}
self.last_update = None
def add_property(self, key, value):
self.data['properties'][key] = value
def add_summary_event(self, event):
if 'summary' in self.data:
if self.last_update and self.last_update > event.timestamp:
return
self.data['summary'] = event.extra_data
self.last_update = event.timestamp
def to_representation(self):
data = {
'slug': self.slug,
'label': self.block.title,
'summary_date': display_formatted_ts(self.last_update) if self.last_update else '',
'data': self.data
}
return data
class TripOrgQualityReport(object):
org = None
results = {}
config = {}
def __init__(self, org):
self.org = org
self.results = {}
self.config = self._get_config_attributes()
def _get_config_attributes(self):
config_name = ':report:trip_quality:config'
attribute = ConfigAttribute.objects.get_attribute_by_priority(name=config_name, target_slug=self.org.obj_target_slug)
if attribute:
return attribute.data
return {
'summary_keys': [
"Device",
"START (UTC)",
"END (UTC)",
"Duration (Days)",
"Event Count",
"First event at (UTC)",
"Last event at (UTC)",
"Max Humidity (% RH)",
"Min Humidity (% RH)",
"Median Humidity (% RH)",
"Max Pressure (Mbar)",
"Min Pressure (Mbar)",
"Median Pressure (Mbar)",
"Max Temp (C)",
"Min Temp (C)",
"Median Temp (C)",
"Above 30C",
"Below 17C",
"Max Peak (G)",
"TimeStamp(MaxPeak) (UTC)",
"DeltaV at Max Peak (in/s)",
"MaxDeltaV (in/s)",
"TimeStamp(MaxDeltaV) (UTC)",
"Peak at MaxDeltaV (G)"
],
'property_keys': []
}
def analyze(self):
blocks = self.org.data_blocks.all()
for block in blocks:
self.results[block.slug] = TripInfo(block)
block_slugs = [block.slug for block in blocks]
if self.config and 'property_keys' in self.config:
for property_item in self.config['property_keys']:
properties = GenericProperty.objects.filter(target__in=block_slugs, name=property_item)
for p in properties:
self.results[p.target].add_property(property_item, p.value)
q = Q(project_slug='') | Q(project_slug='p--0000-0000')
q = q & Q(device_slug__in=block_slugs, variable_slug__icontains=SYSTEM_VID['TRIP_SUMMARY'])
events = DataManager.filter_qs_using_q(
'event',
q=q
)
for event in events:
self.results[event.device_slug].add_summary_event(event)
to_delete = []
for slug, trip in self.results.items():
if trip.data['summary'] == {}:
# Delete Archive that does not represent a real trip
to_delete.append(slug)
for slug in to_delete:
del(self.results[slug])
| true
| true
|
f704457c6cc7a2334902e0a96a793b9399fd41ce
| 157,354
|
py
|
Python
|
core/tests/test_utils.py
|
luccasparoni/oppia
|
988f7c1e818faf774ec424e33b5dd0267c40237b
|
[
"Apache-2.0"
] | null | null | null |
core/tests/test_utils.py
|
luccasparoni/oppia
|
988f7c1e818faf774ec424e33b5dd0267c40237b
|
[
"Apache-2.0"
] | null | null | null |
core/tests/test_utils.py
|
luccasparoni/oppia
|
988f7c1e818faf774ec424e33b5dd0267c40237b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for test classes."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import ast
import collections
import contextlib
import copy
import inspect
import itertools
import json
import logging
import os
import re
import unittest
from constants import constants
from core.controllers import base
from core.domain import auth_domain
from core.domain import caching_domain
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import fs_domain
from core.domain import fs_services
from core.domain import interaction_registry
from core.domain import question_domain
from core.domain import question_services
from core.domain import rights_manager
from core.domain import skill_domain
from core.domain import skill_services
from core.domain import state_domain
from core.domain import stats_services
from core.domain import story_domain
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import taskqueue_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.platform.search import elastic_search_services
from core.platform.taskqueue import cloud_tasks_emulator
import feconf
import main
import main_mail
import main_taskqueue
from proto import text_classifier_pb2
import python_utils
import schema_utils
import utils
import contextlib2
import elasticsearch
from google.appengine.api import mail
from google.appengine.ext import deferred
from google.appengine.ext import testbed
import requests_mock
import webtest
(
auth_models, exp_models, feedback_models, question_models, skill_models,
story_models, suggestion_models, topic_models,) = (
models.Registry.import_models([
models.NAMES.auth, models.NAMES.exploration, models.NAMES.feedback,
models.NAMES.question, models.NAMES.skill, models.NAMES.story,
models.NAMES.suggestion, models.NAMES.topic]))
current_user_services = models.Registry.import_current_user_services()
datastore_services = models.Registry.import_datastore_services()
email_services = models.Registry.import_email_services()
memory_cache_services = models.Registry.import_cache_services()
platform_auth_services = models.Registry.import_auth_services()
platform_taskqueue_services = models.Registry.import_taskqueue_services()
# Prefix to append to all lines printed by tests to the console.
# We are using the b' prefix as all the stdouts are in bytes.
LOG_LINE_PREFIX = b'LOG_INFO_TEST: '
# List of model classes that don't have Wipeout or Takeout, related class
# methods defined because they're not used directly but only as
# base classes for the other models.
BASE_MODEL_CLASSES_WITHOUT_DATA_POLICIES = (
'BaseCommitLogEntryModel',
'BaseHumanMaintainedModel',
'BaseMapReduceBatchResultsModel',
'BaseModel',
'BaseSnapshotContentModel',
'BaseSnapshotMetadataModel',
'VersionedModel',
)
def get_filepath_from_filename(filename, rootdir):
"""Returns filepath using the filename. Different files are present in
different subdirectories in the rootdir. So, we walk through the rootdir and
match the all the filenames with the given filename. When a match is found
the function returns the complete path of the filename by using
os.path.join(root, filename).
For example signup-page.mainpage.html is present in
core/templates/pages/signup-page and error-page.mainpage.html is present in
core/templates/pages/error-pages. So we walk through core/templates/pages
and a match for signup-page.component.html is found in signup-page
subdirectory and a match for error-page.directive.html is found in
error-pages subdirectory.
Args:
filename: str. The name of the file.
rootdir: str. The directory to search the file in.
Returns:
str | None. The path of the file if file is found otherwise
None.
"""
# This is required since error files are served according to error status
# code. The file served is error-page.mainpage.html but it is compiled and
# stored as error-page-{status_code}.mainpage.html. So, we need to swap the
# name here to obtain the correct filepath.
if filename.startswith('error-page'):
filename = 'error-page.mainpage.html'
matches = list(itertools.chain.from_iterable(
(os.path.join(subdir, f) for f in filenames if f == filename)
for subdir, _, filenames in os.walk(rootdir)))
if len(matches) > 1:
raise Exception('Multiple files found with name: %s' % filename)
return matches[0] if matches else None
def mock_load_template(filename):
"""Mock for load_template function. This mock is required for backend tests
since we do not have webpack compilation before backend tests. The folder to
search templates is webpack_bundles which is generated after webpack
compilation. Since this folder will be missing, load_template function will
return an error. So, we use a mock for load_template which returns the html
file from the source directory instead.
Args:
filename: str. The name of the file for which template is to be
returned.
Returns:
str. The contents of the given file.
"""
filepath = get_filepath_from_filename(
filename, os.path.join('core', 'templates', 'pages'))
with python_utils.open_file(filepath, 'r') as f:
return f.read()
def check_image_png_or_webp(image_string):
"""Checks if the image is in png or webp format only.
Args:
image_string: str. Image url in base64 format.
Returns:
bool. Returns true if image is in WebP format.
"""
return image_string.startswith(('data:image/png', 'data:image/webp'))
def get_storage_model_module_names():
"""Get all module names in storage."""
# As models.NAMES is an enum, it cannot be iterated over. So we use the
# __dict__ property which can be iterated over.
for name in models.NAMES.__dict__:
if '__' not in name:
yield name
def get_storage_model_classes():
"""Get all model classes in storage."""
for module_name in get_storage_model_module_names():
(module,) = models.Registry.import_models([module_name])
for member_name, member_obj in inspect.getmembers(module):
if inspect.isclass(member_obj):
clazz = getattr(module, member_name)
all_base_classes = [
base_class.__name__ for base_class in inspect.getmro(
clazz)]
if 'Model' in all_base_classes:
yield clazz
class ElasticSearchStub(python_utils.OBJECT):
"""This stub class mocks the functionality of ES in
elastic_search_services.py.
IMPORTANT NOTE TO DEVELOPERS: These mock functions are NOT guaranteed to
be exact implementations of elasticsearch functionality. If the results of
this mock and the local dev elasticsearch instance differ, the mock
functions should be updated so that their behaviour matches what a local
dev instance would return. (For example, this mock always has a 'version'
of 1 in the return dict and an arbitrary '_seq_no', although the version
number increments with every PUT in the elasticsearch Python client
library and the '_seq_no' increments with every operation.)
"""
_DB = {}
def reset(self):
"""Helper method that clears the mock database."""
self._DB.clear()
def _generate_index_not_found_error(self, index_name):
"""Helper method that generates an elasticsearch 'index not found' 404
error.
Args:
index_name: str. The index that was not found.
Returns:
elasticsearch.NotFoundError. A manually-constructed error
indicating that the index was not found.
"""
raise elasticsearch.NotFoundError(
404, 'index_not_found_exception', {
'status': 404,
'error': {
'reason': 'no such index [%s]' % index_name,
'root_cause': [{
'reason': 'no such index [%s]' % index_name,
'index': index_name,
'index_uuid': '_na_',
'type': 'index_not_found_exception',
'resource.type': 'index_or_alias',
'resource.id': index_name
}],
'index': index_name,
'index_uuid': '_na_',
'type': 'index_not_found_exception',
'resource.type': 'index_or_alias',
'resource.id': index_name
}
}
)
def mock_create_index(self, index_name):
"""Creates an index with the given name.
Args:
index_name: str. The name of the index to create.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
elasticsearch.RequestError. An index with the given name already
exists.
"""
if index_name in self._DB:
raise elasticsearch.RequestError(
400, 'resource_already_exists_exception',
'index [%s/RaNdOmStRiNgOfAlPhAs] already exists' % index_name)
self._DB[index_name] = []
return {
'index': index_name,
'acknowledged': True,
'shards_acknowledged': True
}
def mock_index(self, index_name, document, id=None): # pylint: disable=redefined-builtin
"""Adds a document with the given ID to the index.
Note that, unfortunately, we have to keep the name of "id" for the
last kwarg, although it conflicts with a Python builtin. This is
because the name is an existing part of the API defined at
https://elasticsearch-py.readthedocs.io/en/v7.10.1/api.html
Args:
index_name: str. The name of the index to create.
document: dict. The document to store.
id: str. The unique identifier of the document.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
elasticsearch.RequestError. An index with the given name already
exists.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
self._DB[index_name] = [
d for d in self._DB[index_name] if d['id'] != id]
self._DB[index_name].append(document)
return {
'_index': index_name,
'_shards': {
'total': 2,
'successful': 1,
'failed': 0,
},
'_seq_no': 96,
'_primary_term': 1,
'result': 'created',
'_id': id,
'_version': 1,
'_type': '_doc',
}
def mock_exists(self, index_name, doc_id):
"""Checks whether a document with the given ID exists in the mock
database.
Args:
index_name: str. The name of the index to check.
doc_id: str. The document id to check.
Returns:
bool. Whether the document exists in the index.
Raises:
elasticsearch.NotFoundError: The given index name was not found.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
return any([d['id'] == doc_id for d in self._DB[index_name]])
def mock_delete(self, index_name, doc_id):
"""Deletes a document from an index in the mock database. Does nothing
if the document is not in the index.
Args:
index_name: str. The name of the index to delete the document from.
doc_id: str. The document id to be deleted from the index.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
Exception. The document does not exist in the index.
elasticsearch.NotFoundError. The given index name was not found, or
the given doc_id was not found in the given index.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
docs = [d for d in self._DB[index_name] if d['id'] != doc_id]
if len(self._DB[index_name]) != len(docs):
self._DB[index_name] = docs
return {
'_type': '_doc',
'_seq_no': 99,
'_shards': {
'total': 2,
'successful': 1,
'failed': 0
},
'result': 'deleted',
'_primary_term': 1,
'_index': index_name,
'_version': 4,
'_id': '0'
}
raise elasticsearch.NotFoundError(
404, {
'_index': index_name,
'_type': '_doc',
'_id': doc_id,
'_version': 1,
'result': 'not_found',
'_shards': {
'total': 2,
'successful': 1,
'failed': 0
},
'_seq_no': 103,
'_primary_term': 1
})
def mock_delete_by_query(self, index_name, query):
"""Deletes documents from an index based on the given query.
Note that this mock only supports a specific for the query, i.e. the
one which clears the entire index. It asserts that all calls to this
function use that query format.
Args:
index_name: str. The name of the index to delete the documents from.
query: dict. The query that defines which documents to delete.
Returns:
dict. A dict representing the ElasticSearch response.
Raises:
AssertionError. The query is not in the correct form.
elasticsearch.NotFoundError. The given index name was not found.
"""
assert query.keys() == ['query']
assert query['query'] == {
'match_all': {}
}
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
index_size = len(self._DB[index_name])
del self._DB[index_name][:]
return {
'took': 72,
'version_conflicts': 0,
'noops': 0,
'throttled_until_millis': 0,
'failures': [],
'throttled_millis': 0,
'total': index_size,
'batches': 1,
'requests_per_second': -1.0,
'retries': {u'search': 0, u'bulk': 0},
'timed_out': False,
'deleted': index_size
}
def mock_search(self, body=None, index=None, params=None):
"""Searches and returns documents that match the given query.
Args:
body: dict. A dictionary search definition that uses Query DSL.
index: str. The name of the index to search.
params: dict. A dict with two keys: `size` and `from`. The
corresponding values are ints which represent the number of
results to fetch, and the offset from which to fetch them,
respectively.
Returns:
dict. A dict representing the ElasticSearch response.
Raises:
AssertionError. The given arguments are not supported by this mock.
elasticsearch.NotFoundError. The given index name was not found.
"""
assert body is not None
# "_all" and "" are special index names that are used to search across
# all indexes. We do not allow their use.
assert index not in ['_all', '', None]
assert sorted(params.keys()) == ['from', 'size']
if index not in self._DB:
raise self._generate_index_not_found_error(index)
result_docs = []
result_doc_ids = set([])
for doc in self._DB[index]:
if not doc['id'] in result_doc_ids:
result_docs.append(doc)
result_doc_ids.add(doc['id'])
filters = body['query']['bool']['filter']
terms = body['query']['bool']['must']
for f in filters:
for k, v in f['match'].items():
result_docs = [doc for doc in result_docs if doc[k] in v]
if terms:
filtered_docs = []
for term in terms:
for _, v in term.items():
values = v['query'].split(' ')
for doc in result_docs:
strs = [val for val in doc.values() if isinstance(
val, python_utils.BASESTRING)]
words = []
for s in strs:
words += s.split(' ')
if all([value in words for value in values]):
filtered_docs.append(doc)
result_docs = filtered_docs
formatted_result_docs = [{
'_id': doc['id'],
'_score': 0.0,
'_type': '_doc',
'_index': index,
'_source': doc
} for doc in result_docs[
params['from']: params['from'] + params['size']
]]
return {
'timed_out': False,
'_shards': {
'failed': 0,
'total': 1,
'successful': 1,
'skipped': 0
},
'took': 4,
'hits': {
'hits': formatted_result_docs
},
'total': {
'value': len(formatted_result_docs),
'relation': 'eq'
},
'max_score': max(
[0.0] + [d['_score'] for d in formatted_result_docs]),
}
class AuthServicesStub(python_utils.OBJECT):
"""Test-only implementation of the public API in core.platform.auth."""
def __init__(self):
"""Initializes a new instance that emulates an empty auth server."""
self._user_id_by_auth_id = {}
self._external_user_id_associations = set()
@classmethod
def install_stub(cls, test):
"""Installs a new instance of the stub onto the given test instance.
Args:
test: GenericTestBase. The test instance to install the stub on.
Returns:
callable. A function that will uninstall the stub when called.
"""
with contextlib2.ExitStack() as stack:
stub = cls()
stack.enter_context(test.swap(
platform_auth_services, 'establish_auth_session',
stub.establish_auth_session))
stack.enter_context(test.swap(
platform_auth_services, 'destroy_auth_session',
stub.destroy_auth_session))
stack.enter_context(test.swap(
platform_auth_services, 'get_auth_claims_from_request',
stub.get_auth_claims_from_request))
stack.enter_context(test.swap(
platform_auth_services, 'mark_user_for_deletion',
stub.mark_user_for_deletion))
stack.enter_context(test.swap(
platform_auth_services, 'delete_external_auth_associations',
stub.delete_external_auth_associations))
stack.enter_context(test.swap(
platform_auth_services,
'verify_external_auth_associations_are_deleted',
stub.verify_external_auth_associations_are_deleted))
stack.enter_context(test.swap(
platform_auth_services, 'get_auth_id_from_user_id',
stub.get_auth_id_from_user_id))
stack.enter_context(test.swap(
platform_auth_services, 'get_user_id_from_auth_id',
stub.get_user_id_from_auth_id))
stack.enter_context(test.swap(
platform_auth_services, 'get_multi_user_ids_from_auth_ids',
stub.get_multi_user_ids_from_auth_ids))
stack.enter_context(test.swap(
platform_auth_services, 'get_multi_auth_ids_from_user_ids',
stub.get_multi_auth_ids_from_user_ids))
stack.enter_context(test.swap(
platform_auth_services, 'associate_auth_id_with_user_id',
stub.associate_auth_id_with_user_id))
stack.enter_context(test.swap(
platform_auth_services,
'associate_multi_auth_ids_with_user_ids',
stub.associate_multi_auth_ids_with_user_ids))
# Standard usage of ExitStack: enter a bunch of context managers
# from the safety of an ExitStack's context. Once they've all been
# opened, pop_all() of them off of the original context so they can
# *stay* open. Calling the function returned will exit all of them
# in reverse order.
# https://docs.python.org/3/library/contextlib.html#cleaning-up-in-an-enter-implementation
return stack.pop_all().close
@classmethod
def establish_auth_session(cls, unused_request, unused_response):
"""Sets login cookies to maintain a user's sign-in session.
Args:
unused_request: webapp2.Request. Unused because os.environ handles
sessions.
unused_response: webapp2.Response. Unused because os.environ handles
sessions.
"""
pass
@classmethod
def destroy_auth_session(cls, unused_response):
"""Clears login cookies from the given response headers.
Args:
unused_response: webapp2.Response. Unused because os.environ handles
sessions.
"""
pass
@classmethod
def get_auth_claims_from_request(cls, unused_request):
"""Authenticates the request and returns claims about its authorizer.
This stub obtains authorization information from os.environ. To make the
operation more authentic, this method also creates a new "external"
association for the user to simulate a genuine "provided" value.
Args:
unused_request: webapp2.Request. The HTTP request to authenticate.
Unused because auth-details are extracted from environment
variables.
Returns:
AuthClaims|None. Claims about the currently signed in user. If no
user is signed in, then returns None.
"""
auth_id = os.environ.get('USER_ID', '')
email = os.environ.get('USER_EMAIL', '')
role_is_super_admin = os.environ.get('USER_IS_ADMIN', '0') == '1'
if auth_id:
return auth_domain.AuthClaims(auth_id, email, role_is_super_admin)
return None
def mark_user_for_deletion(self, user_id):
"""Marks the user, and all of their auth associations, as deleted.
Since the stub does not use models, this operation actually deletes the
user's association. The "external" associations, however, are not
deleted yet.
Args:
user_id: str. The unique ID of the user whose associations should be
deleted.
"""
self._user_id_by_auth_id = {
a: u for a, u in self._user_id_by_auth_id.items() if u != user_id
}
def delete_external_auth_associations(self, user_id):
"""Deletes all associations that refer to the user outside of Oppia.
Args:
user_id: str. The unique ID of the user whose associations should be
deleted.
"""
self._external_user_id_associations.discard(user_id)
def verify_external_auth_associations_are_deleted(self, user_id):
"""Returns true if and only if we have successfully verified that all
external associations have been deleted.
Args:
user_id: str. The unique ID of the user whose associations should be
checked.
Returns:
bool. True if and only if we have successfully verified that all
external associations have been deleted.
"""
return user_id not in self._external_user_id_associations
def get_auth_id_from_user_id(self, user_id):
"""Returns the auth ID associated with the given user ID.
Args:
user_id: str. The user ID.
Returns:
str|None. The auth ID associated with the given user ID, or None if
no association exists.
"""
return python_utils.NEXT(
(a for a, u in self._user_id_by_auth_id.items() if u == user_id),
None)
def get_user_id_from_auth_id(self, auth_id):
"""Returns the user ID associated with the given auth ID.
Args:
auth_id: str. The auth ID.
Returns:
str|None. The user ID associated with the given auth ID, or None if
no association exists.
"""
return self._user_id_by_auth_id.get(auth_id, None)
def get_multi_user_ids_from_auth_ids(self, auth_ids):
"""Returns the user IDs associated with the given auth IDs.
Args:
auth_ids: list(str). The auth IDs.
Returns:
list(str|None). The user IDs associated with each of the given auth
IDs, or None for associations which don't exist.
"""
return [self._user_id_by_auth_id.get(a, None) for a in auth_ids]
def get_multi_auth_ids_from_user_ids(self, user_ids):
"""Returns the auth IDs associated with the given user IDs.
Args:
user_ids: list(str). The user IDs.
Returns:
list(str|None). The auth IDs associated with each of the given user
IDs, or None for associations which don't exist.
"""
auth_id_by_user_id = {u: a for a, u in self._user_id_by_auth_id.items()}
return [auth_id_by_user_id.get(u, None) for u in user_ids]
def associate_auth_id_with_user_id(self, auth_id_user_id_pair):
"""Commits the association between auth ID and user ID.
This method also adds the user to the "external" set of associations.
Args:
auth_id_user_id_pair: auth_domain.AuthIdUserIdPair. The association
to commit.
Raises:
Exception. The IDs are already associated with a value.
"""
auth_id, user_id = auth_id_user_id_pair
if auth_id in self._user_id_by_auth_id:
raise Exception(
'auth_id=%r is already associated with user_id=%r' % (
auth_id, self._user_id_by_auth_id[auth_id]))
auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id).put()
self._external_user_id_associations.add(user_id)
self._user_id_by_auth_id[auth_id] = user_id
def associate_multi_auth_ids_with_user_ids(self, auth_id_user_id_pairs):
"""Commits the associations between auth IDs and user IDs.
This method also adds the users to the "external" set of associations.
Args:
auth_id_user_id_pairs: list(auth_domain.AuthIdUserIdPair). The
associations to commit.
Raises:
Exception. One or more auth associations already exist.
"""
collisions = ', '.join(
'{auth_id=%r: user_id=%r}' % (a, self._user_id_by_auth_id[a])
for a, _ in auth_id_user_id_pairs if a in self._user_id_by_auth_id)
if collisions:
raise Exception('already associated: %s' % collisions)
datastore_services.put_multi(
[auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id)
for auth_id, user_id in auth_id_user_id_pairs])
self._external_user_id_associations.add(
u for _, u in auth_id_user_id_pairs)
self._user_id_by_auth_id.update(auth_id_user_id_pairs)
class TaskqueueServicesStub(python_utils.OBJECT):
"""The stub class that mocks the API functionality offered by the platform
layer, namely the platform.taskqueue taskqueue services API.
"""
def __init__(self, test_base):
"""Initializes a taskqueue services stub that replaces the API
functionality of core.platform.taskqueue.
Args:
test_base: GenericTestBase. The current test base.
"""
self._test_base = test_base
self._client = cloud_tasks_emulator.Emulator(
task_handler=self._task_handler, automatic_task_handling=False)
def _task_handler(self, url, payload, queue_name, task_name=None):
"""Makes a POST request to the task URL in the test app.
Args:
url: str. URL of the handler function.
payload: dict(str : *). Payload to pass to the request. Defaults
to None if no payload is required.
queue_name: str. The name of the queue to add the task to.
task_name: str|None. Optional. The name of the task.
"""
headers = {
'X-Appengine-QueueName': python_utils.convert_to_bytes(queue_name),
'X-Appengine-TaskName': (
# Maps empty strings to None so the output can become 'None'.
python_utils.convert_to_bytes(task_name or None)),
'X-AppEngine-Fake-Is-Admin': python_utils.convert_to_bytes(1),
}
csrf_token = self._test_base.get_new_csrf_token()
self._test_base.post_task(url, payload, headers, csrf_token=csrf_token)
def create_http_task(
self, queue_name, url, payload=None, scheduled_for=None,
task_name=None):
"""Creates a Task in the corresponding queue that will be executed when
the 'scheduled_for' countdown expires using the cloud tasks emulator.
Args:
queue_name: str. The name of the queue to add the task to.
url: str. URL of the handler function.
payload: dict(str : *). Payload to pass to the request. Defaults to
None if no payload is required.
scheduled_for: datetime|None. The naive datetime object for the time
to execute the task. Ignored by this stub.
task_name: str|None. Optional. The name of the task.
"""
# Causes the task to execute immediately by setting the scheduled_for
# time to 0. If we allow scheduled_for to be non-zero, then tests that
# rely on the actions made by the task will become unreliable.
scheduled_for = 0
self._client.create_task(
queue_name, url, payload, scheduled_for=scheduled_for,
task_name=task_name)
def count_jobs_in_taskqueue(self, queue_name=None):
"""Returns the total number of tasks in a single queue if a queue name
is specified or the entire taskqueue if no queue name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
int. The total number of tasks in a single queue or in the entire
taskqueue.
"""
return self._client.get_number_of_tasks(queue_name=queue_name)
def process_and_flush_tasks(self, queue_name=None):
"""Executes all of the tasks in a single queue if a queue name is
specified or all of the tasks in the taskqueue if no queue name is
specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
"""
self._client.process_and_flush_tasks(queue_name=queue_name)
def get_pending_tasks(self, queue_name=None):
"""Returns a list of the tasks in a single queue if a queue name is
specified or a list of all of the tasks in the taskqueue if no queue
name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
list(Task). List of tasks in a single queue or in the entire
taskqueue.
"""
return self._client.get_tasks(queue_name=queue_name)
class MemoryCacheServicesStub(python_utils.OBJECT):
"""The stub class that mocks the API functionality offered by the platform
layer, namely the platform.cache cache services API.
"""
_CACHE_DICT = {}
def get_memory_cache_stats(self):
"""Returns a mock profile of the cache dictionary. This mock does not
have the functionality to test for peak memory usage and total memory
usage so the values for those attributes will be 0.
Returns:
MemoryCacheStats. MemoryCacheStats object containing the total
number of keys in the cache dictionary.
"""
return caching_domain.MemoryCacheStats(0, 0, len(self._CACHE_DICT))
def flush_cache(self):
"""Wipes the cache dictionary clean."""
self._CACHE_DICT.clear()
def get_multi(self, keys):
"""Looks up a list of keys in cache dictionary.
Args:
keys: list(str). A list of keys (strings) to look up.
Returns:
list(str). A list of values in the cache dictionary corresponding to
the keys that are passed in.
"""
assert isinstance(keys, list)
return [self._CACHE_DICT.get(key, None) for key in keys]
def set_multi(self, key_value_mapping):
"""Sets multiple keys' values at once in the cache dictionary.
Args:
key_value_mapping: dict(str, str). Both the key and value are
strings. The value can either be a primitive binary-safe string
or the JSON-encoded string version of the object.
Returns:
bool. Whether the set action succeeded.
"""
assert isinstance(key_value_mapping, dict)
self._CACHE_DICT.update(key_value_mapping)
return True
def delete_multi(self, keys):
"""Deletes multiple keys in the cache dictionary.
Args:
keys: list(str). The keys to delete.
Returns:
int. Number of successfully deleted keys.
"""
assert all(isinstance(key, python_utils.BASESTRING) for key in keys)
keys_to_delete = [key for key in keys if key in self._CACHE_DICT]
for key in keys_to_delete:
del self._CACHE_DICT[key]
return len(keys_to_delete)
class TestBase(unittest.TestCase):
"""Base class for all tests."""
maxDiff = 2500
# A test unicode string.
UNICODE_TEST_STRING = 'unicode ¡马!'
def _get_unicode_test_string(self, suffix):
"""Returns a string that contains unicode characters and ends with the
given suffix. This is used to test that functions behave correctly when
handling strings with unicode characters.
Args:
suffix: str. The suffix to append to the UNICODE_TEST_STRING.
Returns:
str. A string that contains unicode characters and ends with the
given suffix.
"""
return '%s%s' % (self.UNICODE_TEST_STRING, suffix)
def _assert_validation_error(self, item, error_substring):
"""Checks that the given item passes default validation."""
with self.assertRaisesRegexp(utils.ValidationError, error_substring):
item.validate()
def log_line(self, line):
"""Print the line with a prefix that can be identified by the script
that calls the test.
"""
# We are using the b' prefix as all the stdouts are in bytes.
python_utils.PRINT(
b'%s%s' % (LOG_LINE_PREFIX, python_utils.convert_to_bytes(line)))
def shortDescription(self):
"""Additional information logged during unit test invocation."""
# Suppress default logging of docstrings.
return None
def get_updated_param_dict(
self, param_dict, param_changes, exp_param_specs):
"""Updates a param dict using the given list of param_changes.
Note that the list of parameter changes is ordered. Parameter changes
later in the list may depend on parameter changes that have been set
earlier in the same list.
"""
new_param_dict = copy.deepcopy(param_dict)
for param_change in param_changes:
try:
obj_type = exp_param_specs[param_change.name].obj_type
except:
raise Exception('Parameter %s not found' % param_change.name)
new_param_dict[param_change.name] = (
param_change.get_normalized_value(obj_type, new_param_dict))
return new_param_dict
def get_static_asset_filepath(self):
"""Returns filepath to the static files on disk ('' or 'build/')."""
return '' if constants.DEV_MODE else os.path.join('build')
def get_static_asset_url(self, asset_suffix):
"""Returns the relative path for the asset, appending it to the
corresponding cache slug. asset_suffix should have a leading slash.
"""
return '/assets%s%s' % (utils.get_asset_dir_prefix(), asset_suffix)
@contextlib.contextmanager
def capture_logging(self, min_level=logging.NOTSET):
"""Context manager that captures logs into a list.
Strips whitespace from messages for convenience.
https://docs.python.org/3/howto/logging-cookbook.html#using-a-context-manager-for-selective-logging
Args:
min_level: int. The minimum logging level captured by the context
manager. By default, all logging levels are captured. Values
should be one of the following values from the logging module:
NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL.
Yields:
list(str). A live-feed of the logging messages captured so-far.
"""
captured_logs = []
class ListStream(python_utils.OBJECT):
"""Stream-like object that appends writes to the captured logs."""
def write(self, msg):
"""Appends stripped messages to captured logs."""
captured_logs.append(msg.strip())
def flush(self):
"""Does nothing."""
pass
list_stream_handler = logging.StreamHandler(stream=ListStream())
logger = logging.getLogger()
old_level = logger.level
logger.addHandler(list_stream_handler)
logger.setLevel(min_level)
try:
yield captured_logs
finally:
logger.setLevel(old_level)
logger.removeHandler(list_stream_handler)
@contextlib.contextmanager
def swap(self, obj, attr, newvalue):
"""Swap an object's attribute value within the context of a 'with'
statement. The object can be anything that supports getattr and setattr,
such as class instances, modules, etc.
Example usage:
import math
with self.swap(math, 'sqrt', lambda x: 42):
print math.sqrt(16.0) # prints 42
print math.sqrt(16.0) # prints 4 as expected.
To mock class methods, pass the function to the classmethod decorator
first, for example:
import types
with self.swap(
SomePythonClass, 'some_classmethod',
classmethod(new_classmethod)):
NOTE: self.swap and other context managers that are created using
contextlib.contextmanager use generators that yield exactly once. This
means that you can only use them once after construction, otherwise,
the generator will immediately raise StopIteration, and contextlib will
raise a RuntimeError.
"""
original = getattr(obj, attr)
setattr(obj, attr, newvalue)
try:
yield
finally:
setattr(obj, attr, original)
@contextlib.contextmanager
def swap_to_always_return(self, obj, attr, value=None):
"""Swap obj.attr with a function that always returns the given value."""
def function_that_always_returns(*unused_args, **unused_kwargs):
"""Returns the input value."""
return value
with self.swap(obj, attr, function_that_always_returns):
yield
@contextlib.contextmanager
def swap_to_always_raise(self, obj, attr, error=Exception):
"""Swap obj.attr with a function that always raises the given error."""
def function_that_always_raises(*unused_args, **unused_kwargs):
"""Raises the input exception."""
raise error
with self.swap(obj, attr, function_that_always_raises):
yield
@contextlib.contextmanager
def swap_with_checks(
self, obj, attr, new_value, expected_args=None,
expected_kwargs=None, called=True):
"""Swap an object's function value within the context of a 'with'
statement. The object can be anything that supports getattr and setattr,
such as class instances, modules, etc.
Examples:
If you want to check subprocess.Popen is invoked twice like
`subprocess.Popen(['python'], shell=True)` and
`subprocess.Popen(['python2], shell=False), you can first define the
mock function, then the swap, and just run the target function in
context, as follows:
def mock_popen(command, shell):
return
popen_swap = self.swap_with_checks(
subprocess, 'Popen', mock_popen,
expected_args=[(['python'],), (['python2'],)],
expected_kwargs=[{'shell': True}, {'shell': False}])
with popen_swap:
function_that_invokes_popen()
Args:
obj: *. The Python object whose attribute you want to swap.
attr: str. The name of the function to be swapped.
new_value: function. The new function you want to use.
expected_args: None|list(tuple). The expected args that you want
this function to be invoked with. When its value is None, args
will not be checked. If the value type is list, the function
will check whether the called args is the first element in the
list. If matched, this tuple will be removed from the list.
expected_kwargs: None|list(dict). The expected keyword args you want
this function to be invoked with. Similar to expected_args.
called: bool. Whether the function is expected to be invoked. This
will always be checked.
Yields:
context. The context with function replaced.
"""
original = getattr(obj, attr)
# The actual error message will also include detail assert error message
# via the `self.longMessage` below.
msg = 'Expected checks failed when swapping out in %s.%s tests.' % (
obj.__name__, attr)
def wrapper(*args, **kwargs):
"""Wrapper function for the new value. This function will do the
check before the wrapped function is invoked. After the function
finished, the wrapper will update how many times this function is
invoked.
Args:
*args: list(*). The args passed into `attr` function.
**kwargs: dict. The key word args passed into `attr` function.
Returns:
*. Result of `new_value`.
"""
wrapper.called = True
if expected_args is not None:
self.assertEqual(args, expected_args[0], msg=msg)
expected_args.pop(0)
if expected_kwargs is not None:
self.assertEqual(kwargs, expected_kwargs[0], msg=msg)
expected_kwargs.pop(0)
result = new_value(*args, **kwargs)
return result
wrapper.called = False
setattr(obj, attr, wrapper)
error_occurred = False
try:
# This will show the detailed assert message.
self.longMessage = True
yield
except Exception:
error_occurred = True
# Raise issues thrown by the called function or assert error.
raise
finally:
setattr(obj, attr, original)
if not error_occurred:
self.assertEqual(wrapper.called, called, msg=msg)
self.assertFalse(expected_args, msg=msg)
self.assertFalse(expected_kwargs, msg=msg)
self.longMessage = False
def assertRaises(self, *args, **kwargs):
raise NotImplementedError(
'self.assertRaises should not be used in these tests. Please use '
'self.assertRaisesRegexp instead.')
def assertRaisesRegexp( # pylint: disable=keyword-arg-before-vararg
self, expected_exception, expected_regexp, callable_obj=None,
*args, **kwargs):
if not expected_regexp:
raise Exception(
'Please provide a sufficiently strong regexp string to '
'validate that the correct error is being raised.')
return super(TestBase, self).assertRaisesRegexp(
expected_exception, expected_regexp,
callable_obj=callable_obj, *args, **kwargs)
def assert_matches_regexps(self, items, regexps, full_match=False):
"""Asserts that each item matches the corresponding regexp.
If there are any missing or extra items that do not correspond to a
regexp element, then the assertion fails.
Args:
items: list(str). The string elements being matched.
regexps: list(str|RegexObject). The patterns that each item is
expected to match.
full_match: bool. Whether to require items to match exactly with the
corresponding pattern.
Raises:
AssertionError. At least one item does not match its corresponding
pattern, or the number of items does not match the number of
regexp patterns.
"""
get_match = re.match if full_match else re.search
differences = [
'~ [i=%d]:\t%r does not match: %r' % (i, item, regexp)
for i, (regexp, item) in enumerate(python_utils.ZIP(regexps, items))
if get_match(regexp, item, re.DOTALL) is None
]
if len(items) < len(regexps):
extra_regexps = regexps[len(items):]
differences.extend(
'- [i=%d]:\tmissing item expected to match: %r' % (i, regexp)
for i, regexp in enumerate(extra_regexps, start=len(items)))
if len(regexps) < len(items):
extra_items = items[len(regexps):]
differences.extend(
'+ [i=%d]:\textra item %r' % (i, item)
for i, item in enumerate(extra_items, start=len(regexps)))
if differences:
error_message = 'Lists differ:\n\t%s' % '\n\t'.join(differences)
raise AssertionError(error_message)
class AppEngineTestBase(TestBase):
"""Minimal base class for tests that need Google App Engine functionality.
This class is primarily designed for unit tests in core.platform, where we
write adapters around Oppia's third-party dependencies. Generally, our unit
tests depend on stub implementations of these adapters to protect them from
platform-specific behavior. Such stubs are installed in the
GenericTestBase.run() method.
Most of the unit tests in our code base do, and should, inherit from
`GenericTestBase` to stay platform-agnostic. The platform layer itself,
however, can _not_ mock out platform-specific behavior. Those unit tests
need to interact with a real implementation. This base class provides the
bare-minimum functionality and stubs necessary to do so.
"""
# Environment values that our tests depend on.
AUTH_DOMAIN = 'example.com'
HTTP_HOST = 'localhost'
SERVER_NAME = 'localhost'
SERVER_PORT = '8080'
DEFAULT_VERSION_HOSTNAME = '%s:%s' % (HTTP_HOST, SERVER_PORT)
def __init__(self, *args, **kwargs):
super(AppEngineTestBase, self).__init__(*args, **kwargs)
# Defined outside of setUp() because we access it from methods, but can
# only install it during the run() method. Defining it in __init__
# satisfies pylint's attribute-defined-outside-init warning.
self._platform_taskqueue_services_stub = TaskqueueServicesStub(self)
def setUp(self):
super(AppEngineTestBase, self).setUp()
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.setup_env(
overwrite=True,
auth_domain=self.AUTH_DOMAIN, http_host=self.HTTP_HOST,
server_name=self.SERVER_NAME, server_port=self.SERVER_PORT,
default_version_hostname=self.DEFAULT_VERSION_HOSTNAME)
# Google App Engine service stubs.
self.testbed.init_app_identity_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_files_stub()
self.testbed.init_memcache_stub()
self.testbed.init_search_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_user_stub()
policy = (
datastore_services.make_instantaneous_global_consistency_policy())
self.testbed.init_datastore_v3_stub(consistency_policy=policy)
# The root path tells the testbed where to find the queue.yaml file.
self.testbed.init_taskqueue_stub(root_path=os.getcwd())
self._testbed_taskqueue_stub = (
self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME))
# Set up apps for testing.
self.testapp = webtest.TestApp(main.app)
self.taskqueue_testapp = webtest.TestApp(main_taskqueue.app)
self.mail_testapp = webtest.TestApp(main_mail.app)
def tearDown(self):
self.testbed.deactivate()
super(AppEngineTestBase, self).tearDown()
def run(self, result=None):
"""Run the test, collecting the result into the specified TestResult.
Reference URL:
https://docs.python.org/3/library/unittest.html#unittest.TestCase.run
AppEngineTestBase's override of run() wraps super().run() in "swap"
contexts which stub out the platform taskqueue services.
Args:
result: TestResult | None. Holds onto the results of each test. If
None, a temporary result object is created (by calling the
defaultTestResult() method) and used instead.
"""
platform_taskqueue_services_swap = self.swap(
platform_taskqueue_services, 'create_http_task',
self._platform_taskqueue_services_stub.create_http_task)
with platform_taskqueue_services_swap:
super(AppEngineTestBase, self).run(result=result)
def _get_all_queue_names(self):
"""Returns a list of all queue names."""
return [q['name'] for q in self._testbed_taskqueue_stub.GetQueues()]
def count_jobs_in_taskqueue(self, queue_name):
"""Returns the total number of tasks in a single queue if a queue name
is specified or the entire taskqueue if no queue name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
int. The total number of tasks in a single queue or in the entire
taskqueue.
"""
return self._platform_taskqueue_services_stub.count_jobs_in_taskqueue(
queue_name=queue_name)
def process_and_flush_pending_tasks(self, queue_name=None):
"""Executes all of the tasks in a single queue if a queue name is
specified or all of the tasks in the taskqueue if no queue name is
specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
"""
self._platform_taskqueue_services_stub.process_and_flush_tasks(
queue_name=queue_name)
def get_pending_tasks(self, queue_name=None):
"""Returns a list of the tasks in a single queue if a queue name is
specified or a list of all of the tasks in the taskqueue if no queue
name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
list(Task). List of tasks in a single queue or in the entire
taskqueue.
"""
return self._platform_taskqueue_services_stub.get_pending_tasks(
queue_name=queue_name)
def count_jobs_in_mapreduce_taskqueue(self, queue_name):
"""Counts the jobs in the given MapReduce taskqueue."""
return len(self.get_pending_mapreduce_tasks(queue_name=queue_name))
def get_pending_mapreduce_tasks(self, queue_name=None):
"""Returns the jobs in the given MapReduce taskqueue. If queue_name is
None, defaults to returning the jobs in all available queues.
"""
queue_names = None if queue_name is None else [queue_name]
return self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names)
def _execute_mapreduce_tasks(self, tasks):
"""Execute MapReduce queued tasks.
Args:
tasks: list(google.appengine.api.taskqueue.taskqueue.Task). The
queued tasks.
"""
for task in tasks:
if task.url == '/_ah/queue/deferred':
deferred.run(task.payload)
else:
# All other tasks will be for MapReduce or taskqueue.
params = task.payload or ''
headers = {
'Content-Length': python_utils.convert_to_bytes(len(params))
}
headers.update(
(key, python_utils.convert_to_bytes(val))
for key, val in task.headers.items())
app = (
self.taskqueue_testapp if task.url.startswith('/task') else
self.testapp)
response = app.post(
task.url, params=params, headers=headers,
expect_errors=True)
if response.status_code != 200:
raise RuntimeError('MapReduce task failed: %r' % task)
def process_and_flush_pending_mapreduce_tasks(self, queue_name=None):
"""Runs and flushes pending MapReduce tasks. If queue_name is None, does
so for all queues; otherwise, this only runs and flushes tasks for the
specified queue.
For more information on taskqueue_stub, see:
https://code.google.com/p/googleappengine/source/browse/trunk/python/google/appengine/api/taskqueue/taskqueue_stub.py
"""
queue_names = (
self._get_all_queue_names() if queue_name is None else [queue_name])
get_enqueued_tasks = lambda: list(
self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names))
# Loops until get_enqueued_tasks() returns an empty list.
for tasks in iter(get_enqueued_tasks, []):
for queue in queue_names:
self._testbed_taskqueue_stub.FlushQueue(queue)
self._execute_mapreduce_tasks(tasks)
def run_but_do_not_flush_pending_mapreduce_tasks(self):
""""Runs, but does not flush, the pending MapReduce tasks."""
queue_names = self._get_all_queue_names()
tasks = self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names)
for queue in queue_names:
self._testbed_taskqueue_stub.FlushQueue(queue)
self._execute_mapreduce_tasks(tasks)
class GenericTestBase(AppEngineTestBase):
"""Base test class with common/generic helper methods.
Unless a class is testing for "platform"-specific behavior (e.g., testing
third-party library code or database model implementations), always inherit
from this base class. Otherwise, inherit from unittest.TestCase (preferred)
or AppEngineTestBase if Google App Engine services/behavior is needed.
TODO(#12135): Split this enormous test base into smaller, focused pieces.
"""
# NOTE: For tests that do not/can not use the default super-admin, authors
# can override the following class-level constant.
AUTO_CREATE_DEFAULT_SUPERADMIN_USER = True
# This is the value that gets returned by default when
# app_identity.get_application_id() is called during tests.
EXPECTED_TEST_APP_ID = 'dummy-cloudsdk-project-id'
SUPER_ADMIN_EMAIL = 'tmpsuperadmin@example.com'
SUPER_ADMIN_USERNAME = 'tmpsuperadm1n'
# Dummy strings representing user attributes. Note that it is up to the
# individual test to actually register these users as editors, admins, etc.
ADMIN_EMAIL = 'admin@example.com'
# Usernames containing the string 'admin' are reserved, so we use 'adm'
# instead.
ADMIN_USERNAME = 'adm'
MODERATOR_EMAIL = 'moderator@example.com'
MODERATOR_USERNAME = 'moderator'
OWNER_EMAIL = 'owner@example.com'
OWNER_USERNAME = 'owner'
EDITOR_EMAIL = 'editor@example.com'
EDITOR_USERNAME = 'editor'
TOPIC_MANAGER_EMAIL = 'topicmanager@example.com'
TOPIC_MANAGER_USERNAME = 'topicmanager'
VOICE_ARTIST_EMAIL = 'voiceartist@example.com'
VOICE_ARTIST_USERNAME = 'voiceartist'
VIEWER_EMAIL = 'viewer@example.com'
VIEWER_USERNAME = 'viewer'
NEW_USER_EMAIL = 'new.user@example.com'
NEW_USER_USERNAME = 'newuser'
DEFAULT_END_STATE_NAME = 'End'
PSEUDONYMOUS_ID = 'pid_%s' % ('a' * 32)
VERSION_0_STATES_DICT = {
feconf.DEFAULT_INIT_STATE_NAME: {
'content': [{'type': 'text', 'value': ''}],
'param_changes': [],
'interaction': {
'customization_args': {},
'id': 'Continue',
'handlers': [{
'name': 'submit',
'rule_specs': [{
'dest': 'END',
'feedback': [],
'param_changes': [],
'definition': {'rule_type': 'default'},
}],
}],
},
},
}
VERSION_27_STATE_DICT = {
'content': {'content_id': 'content', 'html': ''},
'param_changes': [],
'content_ids_to_audio_translations': {
'content': {},
'default_outcome': {},
'hint_1': {},
'solution': {},
},
'written_translations': {
'translations_mapping': {
'content': {},
'default_outcome': {},
'hint_1': {},
'solution': {},
},
},
'interaction': {
'solution': {
'correct_answer': 'Solution',
'explanation': {
'content_id': 'solution',
'html': '<p>Solution explanation</p>',
},
'answer_is_exclusive': False,
},
'answer_groups': [],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': '',
},
'dest': None,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': True,
},
'customization_args': {
'rows': {'value': 1},
'placeholder': {'value': 'Enter text here'},
},
'confirmed_unclassified_answers': [],
'id': 'TextInput',
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': '<p>Hint 1</p>',
},
}],
},
'classifier_model_id': None,
}
VERSION_21_STATE_DICT = {
'END': {
'classifier_model_id': None,
'content': {
'content_id': 'content',
'html': 'Congratulations, you have finished!',
},
'content_ids_to_audio_translations': {'content': {}},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {
'recommendedExplorationIds': {'value': []},
},
'default_outcome': None,
'hints': [],
'id': 'EndExploration',
'solution': None,
},
'param_changes': [],
},
'Introduction': {
'classifier_model_id': None,
'content': {'content_id': 'content', 'html': ''},
'content_ids_to_audio_translations': {
'content': {},
'default_outcome': {},
'feedback_1': {},
},
'interaction': {
'answer_groups': [{
'outcome': {
'dest': 'END',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Correct!</p>',
},
'labelled_as_correct': False,
'missing_prerequisite_skill_id': None,
'param_changes': [],
'refresher_exploration_id': None,
},
'rule_specs': [{
'inputs': {'x': 'InputString'},
'rule_type': 'Equals',
}],
'tagged_misconception_id': None,
'training_data': ['answer1', 'answer2', 'answer3'],
}],
'confirmed_unclassified_answers': [],
'customization_args': {
'placeholder': {'value': ''},
'rows': {'value': 1},
},
'default_outcome': {
'dest': 'Introduction',
'feedback': {'content_id': 'default_outcome', 'html': ''},
'labelled_as_correct': False,
'missing_prerequisite_skill_id': None,
'param_changes': [],
'refresher_exploration_id': None,
},
'hints': [],
'id': 'TextInput',
'solution': None,
},
'param_changes': [],
},
}
VERSION_1_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'prerequisite_skill_ids': [],
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_2_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_3_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'description': '',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_4_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math math_content-with-value="{'
'&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, '
'&quot;svg_filename&quot;: &quot;&quot;'
'}">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'description': '',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_1_SUBTOPIC_DICT = {
'skill_ids': ['skill_1'],
'id': 1,
'title': 'A subtitle',
}
# Dictionary-like data structures within sample YAML must be formatted
# alphabetically to match string equivalence with YAML generation tests. The
# indentations are also important, since it is used to define nesting (just
# like Python).
#
# If evaluating differences in YAML, conversion to dict form via
# utils.dict_from_yaml can isolate differences quickly.
SAMPLE_YAML_CONTENT = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
states:
%s:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: %d
tags: []
title: Title
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME, feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_STATE_SCHEMA_VERSION)
SAMPLE_UNTITLED_YAML_CONTENT = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
states:
%s:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback: []
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
fallbacks: []
id: null
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback: []
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
fallbacks: []
id: null
param_changes: []
states_schema_version: %d
tags: []
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.LAST_UNTITLED_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME, feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_STATE_SCHEMA_VERSION)
def run(self, result=None):
"""Run the test, collecting the result into the specified TestResult.
Reference URL:
https://docs.python.org/3/library/unittest.html#unittest.TestCase.run
GenericTestBase's override of run() wraps super().run() in swap
contexts to mock out the cache and taskqueue services.
Args:
result: TestResult | None. Holds onto the results of each test. If
None, a temporary result object is created (by calling the
defaultTestResult() method) and used instead.
"""
memory_cache_services_stub = MemoryCacheServicesStub()
memory_cache_services_stub.flush_cache()
es_stub = ElasticSearchStub()
es_stub.reset()
with contextlib2.ExitStack() as stack:
stack.callback(AuthServicesStub.install_stub(self))
stack.enter_context(self.swap(
elastic_search_services.ES.indices, 'create',
es_stub.mock_create_index))
stack.enter_context(self.swap(
elastic_search_services.ES, 'index',
es_stub.mock_index))
stack.enter_context(self.swap(
elastic_search_services.ES, 'exists',
es_stub.mock_exists))
stack.enter_context(self.swap(
elastic_search_services.ES, 'delete',
es_stub.mock_delete))
stack.enter_context(self.swap(
elastic_search_services.ES, 'delete_by_query',
es_stub.mock_delete_by_query))
stack.enter_context(self.swap(
elastic_search_services.ES, 'search',
es_stub.mock_search))
stack.enter_context(self.swap(
memory_cache_services, 'flush_cache',
memory_cache_services_stub.flush_cache))
stack.enter_context(self.swap(
memory_cache_services, 'get_multi',
memory_cache_services_stub.get_multi))
stack.enter_context(self.swap(
memory_cache_services, 'set_multi',
memory_cache_services_stub.set_multi))
stack.enter_context(self.swap(
memory_cache_services, 'get_memory_cache_stats',
memory_cache_services_stub.get_memory_cache_stats))
stack.enter_context(self.swap(
memory_cache_services, 'delete_multi',
memory_cache_services_stub.delete_multi))
super(GenericTestBase, self).run(result=result)
def setUp(self):
super(GenericTestBase, self).setUp()
if self.AUTO_CREATE_DEFAULT_SUPERADMIN_USER:
self.signup_superadmin_user()
def tearDown(self):
datastore_services.delete_multi(
datastore_services.query_everything().iter(keys_only=True))
super(GenericTestBase, self).tearDown()
def login(self, email, is_super_admin=False):
"""Sets the environment variables to simulate a login.
Args:
email: str. The email of the user who is to be logged in.
is_super_admin: bool. Whether the user is a super admin.
"""
self.testbed.setup_env(
overwrite=True,
user_email=email, user_id=self.get_auth_id_from_email(email),
user_is_admin=('1' if is_super_admin else '0'))
def logout(self):
"""Simulates a logout by resetting the environment variables."""
self.testbed.setup_env(
overwrite=True, user_email='', user_id='', user_is_admin='0')
@contextlib.contextmanager
def mock_datetime_utcnow(self, mocked_datetime):
"""Mocks response from datetime.datetime.utcnow method.
Example usage:
import datetime
mocked_datetime_utcnow = (
datetime.datetime.utcnow() - datetime.timedelta(days=1))
with self.mock_datetime_utcnow(mocked_datetime_utcnow):
print datetime.datetime.utcnow() # prints time reduced by 1 day
print datetime.datetime.utcnow() # prints current time.
Args:
mocked_datetime: datetime.datetime. The datetime which will be used
instead of the current UTC datetime.
Yields:
None. Empty yield statement.
"""
with datastore_services.mock_datetime_for_datastore(mocked_datetime):
yield
@contextlib.contextmanager
def login_context(self, email, is_super_admin=False):
"""Log in with the given email under the context of a 'with' statement.
Args:
email: str. An email associated with a user account.
is_super_admin: bool. Whether the user is a super admin.
Yields:
str. The id of the user associated with the given email, who is now
'logged in'.
"""
self.login(email, is_super_admin=is_super_admin)
try:
yield self.get_user_id_from_email(email)
finally:
self.logout()
@contextlib.contextmanager
def super_admin_context(self):
"""Log in as a global admin under the context of a 'with' statement.
Yields:
str. The id of the user associated with the given email, who is now
'logged in'.
"""
email = self.SUPER_ADMIN_EMAIL
with self.login_context(email, is_super_admin=True) as user_id:
yield user_id
def signup(self, email, username):
"""Complete the signup process for the user with the given username.
Args:
email: str. Email of the given user.
username: str. Username of the given user.
"""
user_services.create_new_user(self.get_auth_id_from_email(email), email)
with self.login_context(email), requests_mock.Mocker() as m:
# We mock out all HTTP requests while trying to signup to avoid
# calling out to real backend services.
m.request(requests_mock.ANY, requests_mock.ANY)
response = self.get_html_response(feconf.SIGNUP_URL)
self.assertEqual(response.status_int, 200)
response = self.testapp.post(feconf.SIGNUP_DATA_URL, params={
'csrf_token': self.get_new_csrf_token(),
'payload': json.dumps(
{'username': username, 'agreed_to_terms': True}),
})
self.assertEqual(response.status_int, 200)
def signup_superadmin_user(self):
"""Signs up a superadmin user. Must be called at the end of setUp()."""
self.signup(self.SUPER_ADMIN_EMAIL, self.SUPER_ADMIN_USERNAME)
def set_config_property(self, config_obj, new_config_value):
"""Sets a given configuration object's value to the new value specified
using a POST request.
"""
with self.super_admin_context():
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_obj.name: new_config_value,
},
}, csrf_token=self.get_new_csrf_token())
def set_user_role(self, username, user_role):
"""Sets the given role for this user.
Args:
username: str. Username of the given user.
user_role: str. Role of the given user.
"""
with self.super_admin_context():
self.post_json('/adminrolehandler', {
'username': username,
'role': user_role,
}, csrf_token=self.get_new_csrf_token())
def set_admins(self, admin_usernames):
"""Sets role of given users as ADMIN.
Args:
admin_usernames: list(str). List of usernames.
"""
for name in admin_usernames:
self.set_user_role(name, feconf.ROLE_ID_ADMIN)
def set_topic_managers(self, topic_manager_usernames):
"""Sets role of given users as TOPIC_MANAGER.
Args:
topic_manager_usernames: list(str). List of usernames.
"""
for name in topic_manager_usernames:
self.set_user_role(name, feconf.ROLE_ID_TOPIC_MANAGER)
def set_moderators(self, moderator_usernames):
"""Sets role of given users as MODERATOR.
Args:
moderator_usernames: list(str). List of usernames.
"""
for name in moderator_usernames:
self.set_user_role(name, feconf.ROLE_ID_MODERATOR)
def set_banned_users(self, banned_usernames):
"""Sets role of given users as BANNED_USER.
Args:
banned_usernames: list(str). List of usernames.
"""
for name in banned_usernames:
self.set_user_role(name, feconf.ROLE_ID_BANNED_USER)
def set_collection_editors(self, collection_editor_usernames):
"""Sets role of given users as COLLECTION_EDITOR.
Args:
collection_editor_usernames: list(str). List of usernames.
"""
for name in collection_editor_usernames:
self.set_user_role(name, feconf.ROLE_ID_COLLECTION_EDITOR)
def get_user_id_from_email(self, email):
"""Gets the user ID corresponding to the given email.
Args:
email: str. A valid email stored in the App Engine database.
Returns:
str|None. ID of the user possessing the given email, or None if
the user does not exist.
"""
user_settings = user_services.get_user_settings_by_auth_id(
self.get_auth_id_from_email(email))
return user_settings and user_settings.user_id
@classmethod
def get_auth_id_from_email(cls, email):
"""Returns a mock auth ID corresponding to the given email.
This method can use any algorithm to produce results as long as, during
the runtime of each test case/method, it is:
1. Pure (same input always returns the same output).
2. One-to-one (no two distinct inputs return the same output).
3. An integer byte-string (integers are always valid in auth IDs).
Args:
email: str. The email address of the user.
Returns:
bytes. The mock auth ID of a user possessing the given email.
"""
# Although the hash function doesn't guarantee a one-to-one mapping, in
# practice it is sufficient for our tests. We make it a positive integer
# because those are always valid auth IDs.
return python_utils.convert_to_bytes(abs(hash(email)))
def _get_response(
self, url, expected_content_type, params=None,
expected_status_int=200):
"""Get a response, transformed to a Python object.
Args:
url: str. The URL to fetch the response.
expected_content_type: str. The content type to expect.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will be
200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
if params is not None:
self.assertIsInstance(params, dict)
expect_errors = expected_status_int >= 400
# This swap is required to ensure that the templates are fetched from
# source directory instead of webpack_bundles since webpack_bundles is
# only produced after webpack compilation which is not performed during
# backend tests.
with self.swap(base, 'load_template', mock_load_template):
response = self.testapp.get(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
if expect_errors:
self.assertTrue(response.status_int >= 400)
else:
self.assertTrue(200 <= response.status_int < 400)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(response.status_int, expected_status_int)
self.assertEqual(response.content_type, expected_content_type)
return response
def get_html_response(self, url, params=None, expected_status_int=200):
"""Get a HTML response, transformed to a Python object.
Args:
url: str. The URL to fetch the response.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will
be 200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
return self._get_response(
url, 'text/html', params=params,
expected_status_int=expected_status_int)
def get_custom_response(
self, url, expected_content_type, params=None,
expected_status_int=200):
"""Get a response other than HTML or JSON as a Python object.
Args:
url: str. The URL to fetch the response.
expected_content_type: str. The content type to expect.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will be
200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
self.assertNotIn(
expected_content_type, ['text/html', 'application/json'])
return self._get_response(
url, expected_content_type, params=params,
expected_status_int=expected_status_int)
def get_response_without_checking_for_errors(
self, url, expected_status_int_list, params=None):
"""Get a response, transformed to a Python object and checks for a list
of status codes.
Args:
url: str. The URL to fetch the response.
expected_status_int_list: list(int). A list of integer status code
to expect.
params: dict. A dictionary that will be encoded into a query string.
Returns:
webtest.TestResponse. The test response.
"""
if params is not None:
self.assertIsInstance(
params, dict,
msg='Expected params to be a dict, received %s' % params)
# This swap is required to ensure that the templates are fetched from
# source directory instead of webpack_bundles since webpack_bundles is
# only produced after webpack compilation which is not performed during
# backend tests.
with self.swap(base, 'load_template', mock_load_template):
response = self.testapp.get(url, params=params, expect_errors=True)
self.assertIn(response.status_int, expected_status_int_list)
return response
def _parse_json_response(self, json_response, expect_errors):
"""Convert a JSON server response to an object (such as a dict)."""
if expect_errors:
self.assertTrue(json_response.status_int >= 400)
else:
self.assertTrue(200 <= json_response.status_int < 400)
self.assertEqual(json_response.content_type, 'application/json')
self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX))
return json.loads(json_response.body[len(feconf.XSSI_PREFIX):])
def get_json(self, url, params=None, expected_status_int=200):
"""Get a JSON response, transformed to a Python object."""
if params is not None:
self.assertIsInstance(params, dict)
expect_errors = expected_status_int >= 400
json_response = self.testapp.get(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def post_json(
self, url, payload, csrf_token=None, expected_status_int=200,
upload_files=None):
"""Post an object to the server by JSON; return the received object."""
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
expect_errors = expected_status_int >= 400
json_response = self._send_post_request(
self.testapp, url, data, expect_errors,
expected_status_int=expected_status_int, upload_files=upload_files)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def delete_json(self, url, params='', expected_status_int=200):
"""Delete object on the server using a JSON call."""
if params:
self.assertIsInstance(
params, dict,
msg='Expected params to be a dict, received %s' % params)
expect_errors = expected_status_int >= 400
json_response = self.testapp.delete(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def _send_post_request(
self, app, url, data, expect_errors, expected_status_int=200,
upload_files=None, headers=None):
"""Sends a post request with the data provided to the url specified.
Args:
app: TestApp. The WSGI application which receives the request and
produces response.
url: str. The URL to send the POST request to.
data: *. To be put in the body of the request. If params is an
iterator, it will be urlencoded. If it is a string, it will not
be encoded, but placed in the body directly. Can be a
collections.OrderedDict with webtest.forms.Upload fields
included.
expect_errors: bool. Whether errors are expected.
expected_status_int: int. The expected status code.
upload_files: list(tuple). List of
(fieldname, filename, file_content) tuples. Can also provide
just (fieldname, filename) to have the file contents will be
read from disk.
headers: dict(str, *). Extra headers to send.
Returns:
webtest.TestResponse. The response of the POST request.
"""
# Convert the files to bytes.
if upload_files is not None:
upload_files = tuple(
tuple(python_utils.convert_to_bytes(f) for f in upload_file)
for upload_file in upload_files)
return app.post(
url, params=data, headers=headers, status=expected_status_int,
upload_files=upload_files, expect_errors=expect_errors)
def post_email(
self, recipient_email, sender_email, subject, body, html_body=None,
expect_errors=False, expected_status_int=200):
"""Post an email from the sender to the recipient.
Args:
recipient_email: str. The email of the recipient.
sender_email: str. The email of the sender.
subject: str. The subject of the email.
body: str. The body of the email.
html_body: str. The HTML body of the email.
expect_errors: bool. Whether errors are expected.
expected_status_int: int. The expected status code of the JSON
response.
Returns:
json. A JSON response generated by _send_post_request function.
"""
email = mail.EmailMessage(
sender=sender_email, to=recipient_email, subject=subject, body=body)
if html_body is not None:
email.html = html_body
mime_email = email.to_mime_message()
headers = {
'Content-Type': mime_email.get_content_type(),
}
data = mime_email.as_string()
incoming_email_url = '/_ah/mail/%s' % recipient_email
return self._send_post_request(
self.mail_testapp, incoming_email_url, data, expect_errors,
headers=headers, expected_status_int=expected_status_int)
def post_task(
self, url, payload, headers, csrf_token=None, expect_errors=False,
expected_status_int=200):
"""Posts an object to the server by JSON with the specific headers
specified; return the received object.
"""
if csrf_token:
payload['csrf_token'] = csrf_token
return self.taskqueue_testapp.post(
url, params=json.dumps(payload), headers=headers,
status=expected_status_int, expect_errors=expect_errors,
content_type='application/json')
def put_json(self, url, payload, csrf_token=None, expected_status_int=200):
"""PUT an object to the server with JSON and return the response."""
params = {'payload': json.dumps(payload)}
if csrf_token:
params['csrf_token'] = csrf_token
expect_errors = expected_status_int >= 400
json_response = self.testapp.put(
url, params=params, expect_errors=expect_errors)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def get_new_csrf_token(self):
"""Generates CSRF token for test."""
response = self.get_json('/csrfhandler')
return response['token']
def save_new_default_exploration(
self, exploration_id, owner_id, title='A title'):
"""Saves a new default exploration written by owner_id.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
title: str. The title of the exploration.
Returns:
Exploration. The exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, category='Algebra')
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def set_interaction_for_state(self, state, interaction_id):
"""Sets the interaction_id, sets the fully populated default interaction
customization arguments, and increments next_content_id_index as needed.
Args:
state: State. The state domain object to set the interaction for.
interaction_id: str. The interaction id to set. Also sets the
default customization args for the given interaction id.
"""
# We wrap next_content_id_index in a dict so that modifying it in the
# inner function modifies the value.
next_content_id_index_dict = {'value': state.next_content_id_index}
def traverse_schema_and_assign_content_ids(value, schema, contentId):
"""Generates content_id from recursively traversing the schema, and
assigning to the current value.
Args:
value: *. The current traversed value in customization
arguments.
schema: dict. The current traversed schema.
contentId: str. The content_id generated so far.
"""
is_subtitled_html_spec = (
schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and
schema['obj_type'] ==
schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_HTML)
is_subtitled_unicode_spec = (
schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and
schema['obj_type'] ==
schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_UNICODE)
if is_subtitled_html_spec or is_subtitled_unicode_spec:
value['content_id'] = '%s_%i' % (
contentId, next_content_id_index_dict['value'])
next_content_id_index_dict['value'] += 1
elif schema['type'] == schema_utils.SCHEMA_TYPE_LIST:
for x in value:
traverse_schema_and_assign_content_ids(
x, schema['items'], contentId)
elif schema['type'] == schema_utils.SCHEMA_TYPE_DICT:
for schema_property in schema['properties']:
traverse_schema_and_assign_content_ids(
x[schema_property.name],
schema_property['schema'],
'%s_%s' % (contentId, schema_property.name))
interaction = (
interaction_registry.Registry.get_interaction_by_id(interaction_id))
ca_specs = interaction.customization_arg_specs
customization_args = {}
for ca_spec in ca_specs:
ca_name = ca_spec.name
ca_value = ca_spec.default_value
traverse_schema_and_assign_content_ids(
ca_value, ca_spec.schema, 'ca_%s' % ca_name)
customization_args[ca_name] = {'value': ca_value}
state.update_interaction_id(interaction_id)
state.update_interaction_customization_args(customization_args)
state.update_next_content_id_index(next_content_id_index_dict['value'])
def save_new_valid_exploration(
self, exploration_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE, end_state_name=None,
interaction_id='TextInput', correctness_feedback_enabled=False):
"""Saves a new strictly-validated exploration.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
title: str. The title of the exploration.
category: str. The category this exploration belongs to.
objective: str. The objective of this exploration.
language_code: str. The language_code of this exploration.
end_state_name: str. The name of the end state for the exploration.
interaction_id: str. The id of the interaction.
correctness_feedback_enabled: bool. Whether correctness feedback is
enabled for the exploration.
Returns:
Exploration. The exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, category=category,
language_code=language_code)
self.set_interaction_for_state(
exploration.states[exploration.init_state_name], interaction_id)
exploration.objective = objective
exploration.correctness_feedback_enabled = correctness_feedback_enabled
# If an end state name is provided, add terminal node with that name.
if end_state_name is not None:
exploration.add_states([end_state_name])
end_state = exploration.states[end_state_name]
self.set_interaction_for_state(end_state, 'EndExploration')
end_state.update_interaction_default_outcome(None)
# Link first state to ending state (to maintain validity).
init_state = exploration.states[exploration.init_state_name]
init_interaction = init_state.interaction
init_interaction.default_outcome.dest = end_state_name
if correctness_feedback_enabled:
init_interaction.default_outcome.labelled_as_correct = True
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_linear_exp_with_state_names_and_interactions(
self, exploration_id, owner_id, state_names, interaction_ids,
title='A title', category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new strictly-validated exploration with a sequence of states.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
state_names: list(str). The names of states to be linked
sequentially in the exploration. Must be a non-empty list and
contain no duplicates.
interaction_ids: list(str). The names of the interaction ids to be
assigned to each state. Values will be cycled, so it doesn't
need to be the same size as state_names, but it must be
non-empty.
title: str. The title of the exploration.
category: str. The category this exploration belongs to.
objective: str. The objective of this exploration.
language_code: str. The language_code of this exploration.
Returns:
Exploration. The exploration domain object.
"""
if not state_names:
raise ValueError('must provide at least one state name')
if not interaction_ids:
raise ValueError('must provide at least one interaction type')
interaction_ids = itertools.cycle(interaction_ids)
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, init_state_name=state_names[0],
category=category, objective=objective, language_code=language_code)
exploration.add_states(state_names[1:])
for from_state_name, dest_state_name in (
python_utils.ZIP(state_names[:-1], state_names[1:])):
from_state = exploration.states[from_state_name]
self.set_interaction_for_state(
from_state, python_utils.NEXT(interaction_ids))
from_state.interaction.default_outcome.dest = dest_state_name
end_state = exploration.states[state_names[-1]]
self.set_interaction_for_state(end_state, 'EndExploration')
end_state.update_interaction_default_outcome(None)
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_exp_with_states_schema_v0(self, exp_id, user_id, title):
"""Saves a new default exploration with a default version 0 states dict.
This function should only be used for creating explorations in tests
involving migration of datastore explorations that use an old states
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating explorations. This is
because the latter approach would result in an exploration with the
*current* states schema version.
Args:
exp_id: str. The exploration ID.
user_id: str. The user_id of the creator.
title: str. The title of the exploration.
"""
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title=title,
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=0,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME,
states=self.VERSION_0_STATES_DICT, param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'%s\'.' % title
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title=title, category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.put()
# Create an ExplorationIssues model to match the behavior of creating
# new explorations.
stats_services.create_exp_issues_for_new_exploration(exp_id, 1)
def save_new_exp_with_custom_states_schema_version(
self, exp_id, user_id, states_dict, version):
"""Saves a new default exploration with the given version of state dict.
This function should only be used for creating explorations in tests
involving migration of datastore explorations that use an old states
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating explorations. This is
because the latter approach would result in an exploration with the
*current* states schema version.
Args:
exp_id: str. The exploration ID.
user_id: str. The user_id of the creator.
states_dict: dict. The dict representation of all the states.
version: int. Custom states schema version.
"""
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title='title',
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=version,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME, states=states_dict,
param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'title\'.'
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title='title', category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.put()
def save_new_exp_with_states_schema_v21(self, exp_id, user_id, title):
"""Saves a new default exploration with a default version 21 states
dictionary. Version 21 is where training data of exploration is stored
with the states dict.
This function should only be used for creating explorations in tests
involving migration of datastore explorations that use an old states
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating explorations. This is
because the latter approach would result in an exploration with the
*current* states schema version.
Args:
exp_id: str. The exploration ID.
user_id: str. The user_id of the creator.
title: str. The title of the exploration.
"""
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title=title,
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=21,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME,
states=self.VERSION_21_STATE_DICT, param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'%s\'.' % title
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title=title, category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.put()
def publish_exploration(self, owner_id, exploration_id):
"""Publish the exploration with the given exploration_id.
Args:
owner_id: str. The user_id of the owner of the exploration.
exploration_id: str. The ID of the new exploration.
"""
committer = user_services.UserActionsInfo(owner_id)
rights_manager.publish_exploration(committer, exploration_id)
def save_new_default_collection(
self, collection_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default collection written by owner_id.
Args:
collection_id: str. The id of the new default collection.
owner_id: str. The user_id of the creator of the collection.
title: str. The title of the collection.
category: str. The category this collection belongs to.
objective: str. The objective of this collection.
language_code: str. The language_code of this collection.
Returns:
Collection. The collection domain object.
"""
collection = collection_domain.Collection.create_default_collection(
collection_id, title=title, category=category, objective=objective,
language_code=language_code)
collection_services.save_new_collection(owner_id, collection)
return collection
def save_new_valid_collection(
self, collection_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE,
exploration_id='an_exploration_id',
end_state_name=DEFAULT_END_STATE_NAME):
"""Creates an Oppia collection and adds a node saving the exploration
details.
Args:
collection_id: str. ID for the collection to be created.
owner_id: str. The user_id of the creator of the collection.
title: str. Title for the collection.
category: str. The category of the exploration.
objective: str. Objective for the exploration.
language_code: str. The language code for the exploration.
exploration_id: str. The exploration_id for the Oppia exploration.
end_state_name: str. The name of the end state for the exploration.
Returns:
Collection. A newly-created collection containing the corresponding
exploration details.
"""
collection = collection_domain.Collection.create_default_collection(
collection_id, title=title, category=category, objective=objective,
language_code=language_code)
# Check whether exploration with given exploration_id exists or not.
exploration = (
exp_fetchers.get_exploration_by_id(exploration_id, strict=False))
if exploration is None:
exploration = self.save_new_valid_exploration(
exploration_id, owner_id, title=title, category=category,
objective=objective, end_state_name=end_state_name)
collection.add_node(exploration.id)
collection_services.save_new_collection(owner_id, collection)
return collection
def publish_collection(self, owner_id, collection_id):
"""Publish the collection with the given collection_id.
Args:
owner_id: str. The user_id of the owner of the collection.
collection_id: str. ID of the collection to be published.
"""
committer = user_services.UserActionsInfo(owner_id)
rights_manager.publish_collection(committer, collection_id)
def save_new_story(
self, story_id, owner_id, corresponding_topic_id,
title='Title', description='Description', notes='Notes',
language_code=constants.DEFAULT_LANGUAGE_CODE,
url_fragment='title', meta_tag_content='story meta tag content'):
"""Creates an Oppia Story and saves it.
NOTE: Callers are responsible for ensuring that the
'corresponding_topic_id' provided is valid, unless a test explicitly
requires it to be invalid.
Args:
story_id: str. ID for the story to be created.
owner_id: str. The user_id of the creator of the story.
title: str. The title of the story.
description: str. The high level description of the story.
notes: str. A set of notes, that describe the characters,
main storyline, and setting.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
language_code: str. The ISO 639-1 code for the language this story
is written in.
url_fragment: str. The url fragment of the story.
meta_tag_content: str. The meta tag content of the story.
Returns:
Story. A newly-created story.
"""
story = story_domain.Story.create_default_story(
story_id, title, description, corresponding_topic_id, url_fragment)
story.title = title
story.description = description
story.notes = notes
story.language_code = language_code
story.url_fragment = url_fragment
story.meta_tag_content = meta_tag_content
story_services.save_new_story(owner_id, story)
return story
def save_new_story_with_story_contents_schema_v1(
self, story_id, thumbnail_filename, thumbnail_bg_color,
owner_id, title, description, notes, corresponding_topic_id,
language_code=constants.DEFAULT_LANGUAGE_CODE,
url_fragment='story-frag',
meta_tag_content='story meta tag content'):
"""Saves a new story with a default version 1 story contents data dict.
This function should only be used for creating stories in tests
involving migration of datastore stories that use an old story contents
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating stories. This is because
the latter approach would result in a story with the *current* story
contents schema version.
Args:
story_id: str. ID for the story to be created.
thumbnail_filename: str|None. Thumbnail filename for the story.
thumbnail_bg_color: str|None. Thumbnail background color for the
story.
owner_id: str. The user_id of the creator of the story.
title: str. The title of the story.
description: str. The high level description of the story.
notes: str. A set of notes, that describe the characters, main
storyline, and setting.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
language_code: str. The ISO 639-1 code for the language this story
is written in.
url_fragment: str. The URL fragment for the story.
meta_tag_content: str. The meta tag content of the story.
"""
story_model = story_models.StoryModel(
id=story_id, thumbnail_filename=thumbnail_filename,
thumbnail_bg_color=thumbnail_bg_color, description=description,
title=title, language_code=language_code,
story_contents_schema_version=1, notes=notes,
corresponding_topic_id=corresponding_topic_id,
story_contents=self.VERSION_1_STORY_CONTENTS_DICT,
url_fragment=url_fragment, meta_tag_content=meta_tag_content)
commit_message = 'New story created with title \'%s\'.' % title
story_model.commit(
owner_id, commit_message,
[{'cmd': story_domain.CMD_CREATE_NEW, 'title': title}])
def save_new_subtopic(self, subtopic_id, owner_id, topic_id):
"""Creates an Oppia subtopic and saves it.
Args:
subtopic_id: str. ID for the subtopic to be created.
owner_id: str. The user_id of the creator of the topic.
topic_id: str. ID for the topic that the subtopic belongs to.
Returns:
SubtopicPage. A newly-created subtopic.
"""
subtopic_page = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page(
subtopic_id, topic_id))
subtopic_changes = [
subtopic_page_domain.SubtopicPageChange({
'cmd': subtopic_page_domain.CMD_CREATE_NEW,
'topic_id': topic_id,
'subtopic_id': subtopic_id,
})
]
subtopic_page_services.save_subtopic_page(
owner_id, subtopic_page, 'Create new subtopic', subtopic_changes)
return subtopic_page
def save_new_topic(
self, topic_id, owner_id, name='topic', abbreviated_name='topic',
url_fragment='topic',
thumbnail_filename='topic.svg',
thumbnail_bg_color=(
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0]),
description='description', canonical_story_ids=None,
additional_story_ids=None, uncategorized_skill_ids=None,
subtopics=None, next_subtopic_id=0,
language_code=constants.DEFAULT_LANGUAGE_CODE,
meta_tag_content='topic meta tag content',
practice_tab_is_displayed=False,
page_title_fragment_for_web='topic page title'):
"""Creates an Oppia Topic and saves it.
Args:
topic_id: str. ID for the topic to be created.
owner_id: str. The user_id of the creator of the topic.
name: str. The name of the topic.
abbreviated_name: str. The abbreviated name of the topic.
url_fragment: str. The url fragment of the topic.
thumbnail_filename: str|None. The thumbnail filename of the topic.
thumbnail_bg_color: str|None. The thumbnail background color of the
topic.
description: str. The description of the topic.
canonical_story_ids: list(str). The list of ids of canonical stories
that are part of the topic.
additional_story_ids: list(str). The list of ids of additional
stories that are part of the topic.
uncategorized_skill_ids: list(str). The list of ids of skills that
are not part of any subtopic.
subtopics: list(Subtopic). The different subtopics that are part of
this topic.
next_subtopic_id: int. The id for the next subtopic.
language_code: str. The ISO 639-1 code for the language this topic
is written in.
meta_tag_content: str. The meta tag content for the topic.
practice_tab_is_displayed: bool. Whether the practice tab should be
displayed.
page_title_fragment_for_web: str. The page title fragment for the
topic.
Returns:
Topic. A newly-created topic.
"""
canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(story_id)
for story_id in (canonical_story_ids or [])
]
additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(story_id)
for story_id in (additional_story_ids or [])
]
uncategorized_skill_ids = uncategorized_skill_ids or []
subtopics = subtopics or []
topic = topic_domain.Topic(
topic_id, name, abbreviated_name, url_fragment, thumbnail_filename,
thumbnail_bg_color, description, canonical_story_references,
additional_story_references, uncategorized_skill_ids, subtopics,
feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION, next_subtopic_id,
language_code, 0, feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION,
meta_tag_content, practice_tab_is_displayed,
page_title_fragment_for_web)
topic_services.save_new_topic(owner_id, topic)
return topic
def save_new_topic_with_subtopic_schema_v1(
self, topic_id, owner_id, name, abbreviated_name, url_fragment,
canonical_name, description, thumbnail_filename, thumbnail_bg_color,
canonical_story_references, additional_story_references,
uncategorized_skill_ids, next_subtopic_id,
language_code=constants.DEFAULT_LANGUAGE_CODE,
meta_tag_content='topic meta tag content',
practice_tab_is_displayed=False,
page_title_fragment_for_web='topic page title'):
"""Saves a new topic with a default version 1 subtopic data dict.
This function should only be used for creating topics in tests involving
migration of datastore topics that use an old subtopic schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating topics. This is because
the latter approach would result in a topic with the *current* subtopic
schema version.
Args:
topic_id: str. ID for the topic to be created.
owner_id: str. The user_id of the creator of the topic.
name: str. The name of the topic.
abbreviated_name: str. The abbreviated name of the topic.
url_fragment: str. The url fragment of the topic.
canonical_name: str. The canonical name (lowercase) of the topic.
description: str. The description of the topic.
thumbnail_filename: str. The thumbnail file name of the topic.
thumbnail_bg_color: str. The thumbnail background color of the
topic.
canonical_story_references: list(StoryReference). A set of story
reference objects representing the canonical stories that are
part of this topic.
additional_story_references: list(StoryReference). A set of story
reference object representing the additional stories that are
part of this topic.
uncategorized_skill_ids: list(str). The list of ids of skills that
are not part of any subtopic.
next_subtopic_id: int. The id for the next subtopic.
language_code: str. The ISO 639-1 code for the language this topic
is written in.
meta_tag_content: str. The meta tag content for the topic.
practice_tab_is_displayed: bool. Whether the practice tab should be
displayed.
page_title_fragment_for_web: str. The page title fragment for the
topic.
"""
topic_rights_model = topic_models.TopicRightsModel(
id=topic_id, manager_ids=[], topic_is_published=True)
topic_model = topic_models.TopicModel(
id=topic_id, name=name, abbreviated_name=abbreviated_name,
url_fragment=url_fragment, thumbnail_filename=thumbnail_filename,
thumbnail_bg_color=thumbnail_bg_color,
canonical_name=canonical_name, description=description,
language_code=language_code,
canonical_story_references=canonical_story_references,
additional_story_references=additional_story_references,
uncategorized_skill_ids=uncategorized_skill_ids,
subtopic_schema_version=1,
story_reference_schema_version=(
feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION),
next_subtopic_id=next_subtopic_id,
subtopics=[self.VERSION_1_SUBTOPIC_DICT],
meta_tag_content=meta_tag_content,
practice_tab_is_displayed=practice_tab_is_displayed,
page_title_fragment_for_web=page_title_fragment_for_web)
commit_message = 'New topic created with name \'%s\'.' % name
topic_rights_model.commit(
committer_id=owner_id,
commit_message='Created new topic rights',
commit_cmds=[{'cmd': topic_domain.CMD_CREATE_NEW}])
topic_model.commit(
owner_id, commit_message,
[{'cmd': topic_domain.CMD_CREATE_NEW, 'name': name}])
def save_new_question(
self, question_id, owner_id, question_state_data,
linked_skill_ids, inapplicable_skill_misconception_ids=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Creates an Oppia Question and saves it.
Args:
question_id: str. ID for the question to be created.
owner_id: str. The id of the user creating the question.
question_state_data: State. The state data for the question.
linked_skill_ids: list(str). List of skill IDs linked to the
question.
inapplicable_skill_misconception_ids: list(str). List of skill
misconceptions ids that are not applicable to the question.
language_code: str. The ISO 639-1 code for the language this
question is written in.
Returns:
Question. A newly-created question.
"""
# This needs to be done because default arguments can not be of list
# type.
question = question_domain.Question(
question_id, question_state_data,
feconf.CURRENT_STATE_SCHEMA_VERSION, language_code, 0,
linked_skill_ids, inapplicable_skill_misconception_ids or [])
question_services.add_question(owner_id, question)
return question
def save_new_question_with_state_data_schema_v27(
self, question_id, owner_id, linked_skill_ids,
inapplicable_skill_misconception_ids=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default question with a default version 27 state data
dict.
This function should only be used for creating questions in tests
involving migration of datastore questions that use an old state data
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating questions. This is because
the latter approach would result in an question with the *current* state
data schema version.
Args:
question_id: str. ID for the question to be created.
owner_id: str. The id of the user creating the question.
linked_skill_ids: list(str). The skill IDs linked to the question.
inapplicable_skill_misconception_ids: list(str). List of skill
misconceptions ids that are not applicable to the question.
language_code: str. The ISO 639-1 code for the language this
question is written in.
"""
# This needs to be done because default arguments can not be of list
# type.
question_model = question_models.QuestionModel(
id=question_id, question_state_data=self.VERSION_27_STATE_DICT,
language_code=language_code, version=1,
question_state_data_schema_version=27,
linked_skill_ids=linked_skill_ids,
inapplicable_skill_misconception_ids=(
inapplicable_skill_misconception_ids or []))
question_model.commit(
owner_id, 'New question created',
[{'cmd': question_domain.CMD_CREATE_NEW}])
def save_new_question_suggestion_with_state_data_schema_v27(
self, author_id, skill_id, suggestion_id=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new question suggestion with a default version 27 state data
dict.
This function should only be used for creating question suggestion in
tests involving migration of datastore question suggestions that use an
old state data schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating questions. This is because
the latter approach would result in an question with the *current* state
data schema version.
"""
score_category = (
suggestion_models.SCORE_TYPE_QUESTION +
suggestion_models.SCORE_CATEGORY_DELIMITER + skill_id)
change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self.VERSION_27_STATE_DICT,
'question_state_data_schema_version': 27,
'language_code': language_code,
'linked_skill_ids': [skill_id],
'inapplicable_skill_misconception_ids': []
},
'skill_id': skill_id,
'skill_difficulty': 0.3
}
if suggestion_id is None:
suggestion_id = (
feedback_models.GeneralFeedbackThreadModel.
generate_new_thread_id(
feconf.ENTITY_TYPE_SKILL, skill_id))
suggestion_models.GeneralSuggestionModel.create(
feconf.SUGGESTION_TYPE_ADD_QUESTION,
feconf.ENTITY_TYPE_SKILL, skill_id, 1,
suggestion_models.STATUS_IN_REVIEW, author_id, None, change,
score_category, suggestion_id, language_code)
return suggestion_id
def save_new_skill(
self, skill_id, owner_id, description='description',
misconceptions=None, rubrics=None, skill_contents=None,
language_code=constants.DEFAULT_LANGUAGE_CODE,
prerequisite_skill_ids=None):
"""Creates an Oppia Skill and saves it.
Args:
skill_id: str. ID for the skill to be created.
owner_id: str. The user_id of the creator of the skill.
description: str. The description of the skill.
misconceptions: list(Misconception)|None. A list of Misconception
objects that contains the various misconceptions of the skill.
rubrics: list(Rubric)|None. A list of Rubric objects that contain
the rubric for each difficulty of the skill.
skill_contents: SkillContents|None. A SkillContents object
containing the explanation and examples of the skill.
language_code: str. The ISO 639-1 code for the language this skill
is written in.
prerequisite_skill_ids: list(str)|None. The prerequisite skill IDs
for the skill.
Returns:
Skill. A newly-created skill.
"""
skill = (
skill_domain.Skill.create_default_skill(skill_id, description, []))
if misconceptions is not None:
skill.misconceptions = misconceptions
skill.next_misconception_id = len(misconceptions) + 1
if skill_contents is not None:
skill.skill_contents = skill_contents
if prerequisite_skill_ids is not None:
skill.prerequisite_skill_ids = prerequisite_skill_ids
if rubrics is not None:
skill.rubrics = rubrics
else:
skill.rubrics = [
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[0], ['Explanation 1']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[1], ['Explanation 2']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[2], ['Explanation 3']),
]
skill.language_code = language_code
skill.version = 0
skill_services.save_new_skill(owner_id, skill)
return skill
def save_new_skill_with_defined_schema_versions(
self, skill_id, owner_id, description, next_misconception_id,
misconceptions=None, rubrics=None, skill_contents=None,
misconceptions_schema_version=1, rubric_schema_version=1,
skill_contents_schema_version=1,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default skill with the given versions for misconceptions
and skill contents.
This function should only be used for creating skills in tests involving
migration of datastore skills that use an old schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating skills. This is because
the latter approach would result in a skill with the *current* schema
version.
Args:
skill_id: str. ID for the skill to be created.
owner_id: str. The user_id of the creator of the skill.
description: str. The description of the skill.
next_misconception_id: int. The misconception id to be used by the
next misconception added.
misconceptions: list(Misconception.to_dict()). The list of
misconception dicts associated with the skill.
rubrics: list(Rubric.to_dict()). The list of rubric dicts associated
with the skill.
skill_contents: SkillContents.to_dict(). A SkillContents dict
containing the explanation and examples of the skill.
misconceptions_schema_version: int. The schema version for the
misconceptions object.
rubric_schema_version: int. The schema version for the rubric
object.
skill_contents_schema_version: int. The schema version for the
skill_contents object.
language_code: str. The ISO 639-1 code for the language this skill
is written in.
"""
skill_model = skill_models.SkillModel(
id=skill_id, description=description, language_code=language_code,
misconceptions=misconceptions, rubrics=rubrics,
skill_contents=skill_contents,
next_misconception_id=next_misconception_id,
misconceptions_schema_version=misconceptions_schema_version,
rubric_schema_version=rubric_schema_version,
skill_contents_schema_version=skill_contents_schema_version,
superseding_skill_id=None, all_questions_merged=False)
skill_model.commit(
owner_id, 'New skill created.',
[{'cmd': skill_domain.CMD_CREATE_NEW}])
def _create_valid_question_data(self, default_dest_state_name):
"""Creates a valid question_data dict.
Args:
default_dest_state_name: str. The default destination state.
Returns:
dict. The default question_data dict.
"""
state = state_domain.State.create_default_state(
default_dest_state_name, is_initial_state=True)
state.update_interaction_id('TextInput')
solution_dict = {
'answer_is_exclusive': False,
'correct_answer': 'Solution',
'explanation': {
'content_id': 'solution',
'html': '<p>This is a solution.</p>',
},
}
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>This is a hint.</p>')),
]
solution = state_domain.Solution.from_dict(
state.interaction.id, solution_dict)
state.update_interaction_solution(solution)
state.update_interaction_hints(hints_list)
state.update_interaction_customization_args({
'placeholder': {
'value': {
'content_id': 'ca_placeholder',
'unicode_str': 'Enter text here',
},
},
'rows': {'value': 1},
})
state.update_next_content_id_index(2)
state.interaction.default_outcome.labelled_as_correct = True
state.interaction.default_outcome.dest = None
return state
class LinterTestBase(GenericTestBase):
"""Base class for linter tests."""
def setUp(self):
super(LinterTestBase, self).setUp()
self.linter_stdout = []
def mock_print(*args):
"""Mock for python_utils.PRINT. Append the values to print to
linter_stdout list.
Args:
*args: list(*). Variable length argument list of values to print
in the same line of output.
"""
self.linter_stdout.append(
' '.join(python_utils.UNICODE(arg) for arg in args))
self.print_swap = self.swap(python_utils, 'PRINT', mock_print)
def assert_same_list_elements(self, phrases, stdout):
"""Checks to see if all of the phrases appear in at least one of the
stdout outputs.
Args:
phrases: list(str). A list of phrases we are trying to find in one
of the stdout outputs. For example, python linting outputs a
success string that includes data we don't have easy access to,
like how long the test took, so we may want to search for a
substring of that success string in stdout.
stdout: list(str). A list of the output results from the method's
execution.
"""
self.assertTrue(
any(all(p in output for p in phrases) for output in stdout))
def assert_failed_messages_count(self, stdout, expected_failed_count):
"""Assert number of expected failed checks to actual number of failed
checks.
Args:
stdout: list(str). A list of linter output messages.
expected_failed_count: int. Expected number of failed messages.
"""
failed_count = sum(msg.startswith('FAILED') for msg in stdout)
self.assertEqual(failed_count, expected_failed_count)
class AuditJobsTestBase(GenericTestBase):
"""Base class for audit jobs tests."""
def run_job_and_check_output(
self, expected_output, sort=False, literal_eval=False):
"""Helper function to run job and compare output.
Args:
expected_output: list(*). The expected result of the job.
sort: bool. Whether to sort the outputs before comparison.
literal_eval: bool. Whether to use ast.literal_eval before
comparison.
"""
self.process_and_flush_pending_tasks()
job_id = self.job_class.create_new()
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0)
self.job_class.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
self.process_and_flush_pending_tasks()
actual_output = self.job_class.get_output(job_id)
if literal_eval:
actual_output_dict = {}
expected_output_dict = {}
for item in (ast.literal_eval(value) for value in actual_output):
value = item[1]
if isinstance(value, list):
value = sorted(value)
actual_output_dict[item[0]] = value
for item in (ast.literal_eval(value) for value in expected_output):
value = item[1]
if isinstance(value, list):
value = sorted(value)
expected_output_dict[item[0]] = value
self.assertItemsEqual(actual_output_dict, expected_output_dict)
for key in actual_output_dict:
self.assertEqual(
actual_output_dict[key], expected_output_dict[key])
elif sort:
self.assertEqual(sorted(actual_output), sorted(expected_output))
else:
self.assertEqual(actual_output, expected_output)
class EmailMessageMock(python_utils.OBJECT):
"""Mock for core.platform.models email services messages."""
def __init__(
self, sender_email, recipient_email, subject, plaintext_body,
html_body, bcc=None, reply_to=None, recipient_variables=None):
"""Inits a mock email message with all the necessary data.
Args:
sender_email: str. The email address of the sender. This should be
in the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS'. Must be utf-8.
recipient_email: str. The email address of the recipient. Must be
utf-8.
subject: str. The subject line of the email, Must be utf-8.
plaintext_body: str. The plaintext body of the email. Must be utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Must be utf-8.
bcc: list(str)|None. Optional argument. List of bcc emails. Emails
must be utf-8.
reply_to: str|None. Optional argument. Reply address formatted like
“reply+<reply_id>@<incoming_email_domain_name> reply_id is the
unique id of the sender.
recipient_variables: dict|None. Optional argument. If batch sending
requires differentiating each email based on the recipient, we
assign a unique id to each recipient, including info relevant to
that recipient so that we can reference it when composing the
email like so:
recipient_variables = {
'bob@example.com': {'first': 'Bob', 'id': 1},
'alice@example.com': {'first': 'Alice', 'id': 2},
}
subject = 'Hey, %recipient.first%'
For more information about this format, see:
https://documentation.mailgun.com/en/latest/user_manual.html#batch-sending
"""
self.sender = sender_email
self.to = recipient_email
self.subject = subject
self.body = plaintext_body
self.html = html_body
self.bcc = bcc
self.reply_to = reply_to
self.recipient_variables = recipient_variables
class GenericEmailTestBase(GenericTestBase):
"""Base class for tests requiring email services."""
emails_dict = collections.defaultdict(list)
def run(self, result=None):
"""Adds a context swap on top of the test_utils.run() method so that
test classes extending GenericEmailTestBase will automatically have a
mailgun api key, mailgun domain name and mocked version of
send_email_to_recipients().
"""
with self.swap(
email_services, 'send_email_to_recipients',
self._send_email_to_recipients):
super(EmailTestBase, self).run(result=result)
def setUp(self):
super(GenericEmailTestBase, self).setUp()
self._wipe_emails_dict()
def _wipe_emails_dict(self):
"""Reset email dictionary for a new test."""
self.emails_dict = collections.defaultdict(list)
def _send_email_to_recipients(
self, sender_email, recipient_emails, subject, plaintext_body,
html_body, bcc=None, reply_to=None, recipient_variables=None):
"""Mocks sending an email to each email in recipient_emails.
Args:
sender_email: str. The email address of the sender. This should be
in the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS'. Must be utf-8.
recipient_emails: list(str). The email addresses of the recipients.
Must be utf-8.
subject: str. The subject line of the email, Must be utf-8.
plaintext_body: str. The plaintext body of the email. Must be utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Must be utf-8.
bcc: list(str)|None. Optional argument. List of bcc emails. Must be
utf-8.
reply_to: str|None. Optional Argument. Reply address formatted like
“reply+<reply_id>@<incoming_email_domain_name> reply_id is the
unique id of the sender.
recipient_variables: dict|None. Optional Argument. If batch sending
requires differentiating each email based on the recipient, we
assign a unique id to each recipient, including info relevant to
that recipient so that we can reference it when composing the
email like so:
recipient_variables = {
'bob@example.com': {'first': 'Bob', 'id': 1},
'alice@example.com': {'first': 'Alice', 'id': 2},
}
subject = 'Hey, %recipient.first%'
For more information about this format, see:
https://documentation.mailgun.com/en/latest/user_manual.html#batch-sending
Returns:
bool. Whether the emails are sent successfully.
"""
bcc_emails = None
if bcc:
bcc_emails = bcc[0] if len(bcc) == 1 else bcc
new_email = EmailMessageMock(
sender_email, recipient_emails, subject, plaintext_body, html_body,
bcc=bcc_emails, reply_to=(reply_to if reply_to else None),
recipient_variables=(
recipient_variables if recipient_variables else None))
for recipient_email in recipient_emails:
self.emails_dict[recipient_email].append(new_email)
return True
def _get_sent_email_messages(self, to):
"""Gets messages to a single recipient email.
Args:
to: str. The recipient email address.
Returns:
list(EmailMessageMock). The list of email messages corresponding to
that recipient email.
"""
return self.emails_dict[to] if to in self.emails_dict else []
def _get_all_sent_email_messages(self):
"""Gets the entire messages dictionary.
Returns:
dict(str, list(EmailMessageMock)). The dict keyed by recipient
email. Each value contains a list of EmailMessageMock objects
corresponding to that recipient email; in other words, all
individual emails sent to that specific recipient email.
"""
return self.emails_dict
EmailTestBase = GenericEmailTestBase
class ClassifierTestBase(GenericEmailTestBase):
"""Base class for classifier test classes that need common functions
for related to reading classifier data and mocking the flow of the
storing the trained models through post request.
This class is derived from GenericEmailTestBase because the
TrainedClassifierHandlerTests test suite requires email services test
functions in addition to the classifier functions defined below.
"""
def post_blob(self, url, payload, expected_status_int=200):
"""Post a BLOB object to the server; return the received object.
Note that this method should only be used for
classifier.TrainedClassifierHandler handler and for no one else. The
reason being, we don't have any general mechanism for security for
transferring binary data. TrainedClassifierHandler implements a
specific mechanism which is restricted to the handler.
Args:
url: str. The URL to which BLOB object in payload should be sent
through a post request.
payload: bytes. Binary data which needs to be sent.
expected_status_int: int. The status expected as a response of post
request.
Returns:
dict. Parsed JSON response received upon invoking the post request.
"""
data = payload
expect_errors = False
if expected_status_int >= 400:
expect_errors = True
response = self._send_post_request(
self.testapp, url, data,
expect_errors, expected_status_int=expected_status_int,
headers={b'content-type': b'application/octet-stream'})
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
# Reference URL:
# https://github.com/Pylons/webtest/blob/
# bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119 .
self.assertEqual(response.status_int, expected_status_int)
return self._parse_json_response(response, expect_errors)
def _get_classifier_data_from_classifier_training_job(
self, classifier_training_job):
"""Retrieves classifier training job from GCS using metadata stored in
classifier_training_job.
Args:
classifier_training_job: ClassifierTrainingJob. Domain object
containing metadata of the training job which is used to
retrieve the trained model.
Returns:
FrozenModel. Protobuf object containing classifier data.
"""
filename = classifier_training_job.classifier_data_filename
file_system_class = fs_services.get_entity_file_system_class()
fs = fs_domain.AbstractFileSystem(file_system_class(
feconf.ENTITY_TYPE_EXPLORATION, classifier_training_job.exp_id))
classifier_data = utils.decompress_from_zlib(fs.get(filename))
classifier_data_proto = text_classifier_pb2.TextClassifierFrozenModel()
classifier_data_proto.ParseFromString(classifier_data)
return classifier_data_proto
class FunctionWrapper(python_utils.OBJECT):
"""A utility for making function wrappers. Create a subclass and override
any or both of the pre_call_hook and post_call_hook methods. See these
methods for more info.
"""
def __init__(self, func):
"""Creates a new FunctionWrapper instance.
Args:
func: a callable, or data descriptor. If it's a descriptor, then
__get__ should return a bound method. For example, func can be
a function, a method, a static or class method, but not a
@property.
"""
self._func = func
self._instance = None
def __call__(self, *args, **kwargs):
"""Overrides the call method for the function to call pre_call_hook
method which would be called before the function is executed and
post_call_hook which would be called after the function is executed.
"""
if self._instance is not None:
args = [self._instance] + list(args)
args_dict = inspect.getcallargs(self._func, *args, **kwargs)
self.pre_call_hook(args_dict)
result = self._func(*args, **kwargs)
self.post_call_hook(args_dict, result)
return result
def __get__(self, instance, owner):
# We have to implement __get__ because otherwise, we don't have a chance
# to bind to the instance self._func was bound to. See the following SO
# answer: https://stackoverflow.com/a/22555978/675311
self._instance = instance
return self
def pre_call_hook(self, args):
"""Override this to do tasks that should be executed before the actual
function call.
Args:
args: list(*). Set of arguments that the function accepts.
"""
pass
def post_call_hook(self, args, result):
"""Override this to do tasks that should be executed after the actual
function call.
Args:
args: list(*). Set of arguments that the function accepts.
result: *. Result returned from the function.
"""
pass
class CallCounter(FunctionWrapper):
"""A function wrapper that keeps track of how often the function is called.
Note that the counter is incremented before each call, so it is also
increased when the function raises an exception.
"""
def __init__(self, f):
"""Counts the number of times the given function has been called. See
FunctionWrapper for arguments.
"""
super(CallCounter, self).__init__(f)
self._times_called = 0
@property
def times_called(self):
"""Property that returns the number of times the wrapped function has
been called.
Returns:
int. The number of times the wrapped function has been called.
"""
return self._times_called
def pre_call_hook(self, args):
"""Method that is called before each function call to increment the
counter tracking the number of times a function is called. This will
also be called even when the function raises an exception.
Args:
args: list(*). Set of arguments that the function accepts.
"""
self._times_called += 1
class FailingFunction(FunctionWrapper):
"""A function wrapper that makes a function fail, raising a given exception.
It can be set to succeed after a given number of calls.
"""
INFINITY = 'infinity'
def __init__(self, f, exception, num_tries_before_success):
"""Create a new Failing function.
Args:
f: func. See FunctionWrapper.
exception: Exception. The exception to be raised.
num_tries_before_success: int. The number of times to raise an
exception, before a call succeeds. If this is 0, all calls will
succeed, if it is FailingFunction. INFINITY, all calls will
fail.
"""
super(FailingFunction, self).__init__(f)
self._exception = exception
self._num_tries_before_success = num_tries_before_success
self._always_fail = (
self._num_tries_before_success == FailingFunction.INFINITY)
self._times_called = 0
if not (self._num_tries_before_success >= 0 or self._always_fail):
raise ValueError(
'num_tries_before_success should either be an '
'integer greater than or equal to 0, '
'or FailingFunction.INFINITY')
def pre_call_hook(self, args):
"""Method that is called each time before the actual function call to
check if the exception is to be raised based on the number of tries
before success.
Args:
args: list(*). Set of arguments this function accepts.
"""
self._times_called += 1
call_should_fail = (
self._num_tries_before_success >= self._times_called)
if call_should_fail or self._always_fail:
raise self._exception
| 41.116802
| 125
| 0.628023
|
from __future__ import absolute_import from __future__ import unicode_literals
import ast
import collections
import contextlib
import copy
import inspect
import itertools
import json
import logging
import os
import re
import unittest
from constants import constants
from core.controllers import base
from core.domain import auth_domain
from core.domain import caching_domain
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import fs_domain
from core.domain import fs_services
from core.domain import interaction_registry
from core.domain import question_domain
from core.domain import question_services
from core.domain import rights_manager
from core.domain import skill_domain
from core.domain import skill_services
from core.domain import state_domain
from core.domain import stats_services
from core.domain import story_domain
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import taskqueue_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.platform.search import elastic_search_services
from core.platform.taskqueue import cloud_tasks_emulator
import feconf
import main
import main_mail
import main_taskqueue
from proto import text_classifier_pb2
import python_utils
import schema_utils
import utils
import contextlib2
import elasticsearch
from google.appengine.api import mail
from google.appengine.ext import deferred
from google.appengine.ext import testbed
import requests_mock
import webtest
(
auth_models, exp_models, feedback_models, question_models, skill_models,
story_models, suggestion_models, topic_models,) = (
models.Registry.import_models([
models.NAMES.auth, models.NAMES.exploration, models.NAMES.feedback,
models.NAMES.question, models.NAMES.skill, models.NAMES.story,
models.NAMES.suggestion, models.NAMES.topic]))
current_user_services = models.Registry.import_current_user_services()
datastore_services = models.Registry.import_datastore_services()
email_services = models.Registry.import_email_services()
memory_cache_services = models.Registry.import_cache_services()
platform_auth_services = models.Registry.import_auth_services()
platform_taskqueue_services = models.Registry.import_taskqueue_services()
LOG_LINE_PREFIX = b'LOG_INFO_TEST: '
# List of model classes that don't have Wipeout or Takeout, related class
# base classes for the other models.
BASE_MODEL_CLASSES_WITHOUT_DATA_POLICIES = (
'BaseCommitLogEntryModel',
'BaseHumanMaintainedModel',
'BaseMapReduceBatchResultsModel',
'BaseModel',
'BaseSnapshotContentModel',
'BaseSnapshotMetadataModel',
'VersionedModel',
)
def get_filepath_from_filename(filename, rootdir):
# This is required since error files are served according to error status
# code. The file served is error-page.mainpage.html but it is compiled and
# stored as error-page-{status_code}.mainpage.html. So, we need to swap the
# name here to obtain the correct filepath.
if filename.startswith('error-page'):
filename = 'error-page.mainpage.html'
matches = list(itertools.chain.from_iterable(
(os.path.join(subdir, f) for f in filenames if f == filename)
for subdir, _, filenames in os.walk(rootdir)))
if len(matches) > 1:
raise Exception('Multiple files found with name: %s' % filename)
return matches[0] if matches else None
def mock_load_template(filename):
filepath = get_filepath_from_filename(
filename, os.path.join('core', 'templates', 'pages'))
with python_utils.open_file(filepath, 'r') as f:
return f.read()
def check_image_png_or_webp(image_string):
return image_string.startswith(('data:image/png', 'data:image/webp'))
def get_storage_model_module_names():
# As models.NAMES is an enum, it cannot be iterated over. So we use the
# __dict__ property which can be iterated over.
for name in models.NAMES.__dict__:
if '__' not in name:
yield name
def get_storage_model_classes():
for module_name in get_storage_model_module_names():
(module,) = models.Registry.import_models([module_name])
for member_name, member_obj in inspect.getmembers(module):
if inspect.isclass(member_obj):
clazz = getattr(module, member_name)
all_base_classes = [
base_class.__name__ for base_class in inspect.getmro(
clazz)]
if 'Model' in all_base_classes:
yield clazz
class ElasticSearchStub(python_utils.OBJECT):
_DB = {}
def reset(self):
self._DB.clear()
def _generate_index_not_found_error(self, index_name):
raise elasticsearch.NotFoundError(
404, 'index_not_found_exception', {
'status': 404,
'error': {
'reason': 'no such index [%s]' % index_name,
'root_cause': [{
'reason': 'no such index [%s]' % index_name,
'index': index_name,
'index_uuid': '_na_',
'type': 'index_not_found_exception',
'resource.type': 'index_or_alias',
'resource.id': index_name
}],
'index': index_name,
'index_uuid': '_na_',
'type': 'index_not_found_exception',
'resource.type': 'index_or_alias',
'resource.id': index_name
}
}
)
def mock_create_index(self, index_name):
if index_name in self._DB:
raise elasticsearch.RequestError(
400, 'resource_already_exists_exception',
'index [%s/RaNdOmStRiNgOfAlPhAs] already exists' % index_name)
self._DB[index_name] = []
return {
'index': index_name,
'acknowledged': True,
'shards_acknowledged': True
}
def mock_index(self, index_name, document, id=None): # pylint: disable=redefined-builtin
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
self._DB[index_name] = [
d for d in self._DB[index_name] if d['id'] != id]
self._DB[index_name].append(document)
return {
'_index': index_name,
'_shards': {
'total': 2,
'successful': 1,
'failed': 0,
},
'_seq_no': 96,
'_primary_term': 1,
'result': 'created',
'_id': id,
'_version': 1,
'_type': '_doc',
}
def mock_exists(self, index_name, doc_id):
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
return any([d['id'] == doc_id for d in self._DB[index_name]])
def mock_delete(self, index_name, doc_id):
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
docs = [d for d in self._DB[index_name] if d['id'] != doc_id]
if len(self._DB[index_name]) != len(docs):
self._DB[index_name] = docs
return {
'_type': '_doc',
'_seq_no': 99,
'_shards': {
'total': 2,
'successful': 1,
'failed': 0
},
'result': 'deleted',
'_primary_term': 1,
'_index': index_name,
'_version': 4,
'_id': '0'
}
raise elasticsearch.NotFoundError(
404, {
'_index': index_name,
'_type': '_doc',
'_id': doc_id,
'_version': 1,
'result': 'not_found',
'_shards': {
'total': 2,
'successful': 1,
'failed': 0
},
'_seq_no': 103,
'_primary_term': 1
})
def mock_delete_by_query(self, index_name, query):
assert query.keys() == ['query']
assert query['query'] == {
'match_all': {}
}
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
index_size = len(self._DB[index_name])
del self._DB[index_name][:]
return {
'took': 72,
'version_conflicts': 0,
'noops': 0,
'throttled_until_millis': 0,
'failures': [],
'throttled_millis': 0,
'total': index_size,
'batches': 1,
'requests_per_second': -1.0,
'retries': {u'search': 0, u'bulk': 0},
'timed_out': False,
'deleted': index_size
}
def mock_search(self, body=None, index=None, params=None):
assert body is not None
# "_all" and "" are special index names that are used to search across
# all indexes. We do not allow their use.
assert index not in ['_all', '', None]
assert sorted(params.keys()) == ['from', 'size']
if index not in self._DB:
raise self._generate_index_not_found_error(index)
result_docs = []
result_doc_ids = set([])
for doc in self._DB[index]:
if not doc['id'] in result_doc_ids:
result_docs.append(doc)
result_doc_ids.add(doc['id'])
filters = body['query']['bool']['filter']
terms = body['query']['bool']['must']
for f in filters:
for k, v in f['match'].items():
result_docs = [doc for doc in result_docs if doc[k] in v]
if terms:
filtered_docs = []
for term in terms:
for _, v in term.items():
values = v['query'].split(' ')
for doc in result_docs:
strs = [val for val in doc.values() if isinstance(
val, python_utils.BASESTRING)]
words = []
for s in strs:
words += s.split(' ')
if all([value in words for value in values]):
filtered_docs.append(doc)
result_docs = filtered_docs
formatted_result_docs = [{
'_id': doc['id'],
'_score': 0.0,
'_type': '_doc',
'_index': index,
'_source': doc
} for doc in result_docs[
params['from']: params['from'] + params['size']
]]
return {
'timed_out': False,
'_shards': {
'failed': 0,
'total': 1,
'successful': 1,
'skipped': 0
},
'took': 4,
'hits': {
'hits': formatted_result_docs
},
'total': {
'value': len(formatted_result_docs),
'relation': 'eq'
},
'max_score': max(
[0.0] + [d['_score'] for d in formatted_result_docs]),
}
class AuthServicesStub(python_utils.OBJECT):
def __init__(self):
self._user_id_by_auth_id = {}
self._external_user_id_associations = set()
@classmethod
def install_stub(cls, test):
with contextlib2.ExitStack() as stack:
stub = cls()
stack.enter_context(test.swap(
platform_auth_services, 'establish_auth_session',
stub.establish_auth_session))
stack.enter_context(test.swap(
platform_auth_services, 'destroy_auth_session',
stub.destroy_auth_session))
stack.enter_context(test.swap(
platform_auth_services, 'get_auth_claims_from_request',
stub.get_auth_claims_from_request))
stack.enter_context(test.swap(
platform_auth_services, 'mark_user_for_deletion',
stub.mark_user_for_deletion))
stack.enter_context(test.swap(
platform_auth_services, 'delete_external_auth_associations',
stub.delete_external_auth_associations))
stack.enter_context(test.swap(
platform_auth_services,
'verify_external_auth_associations_are_deleted',
stub.verify_external_auth_associations_are_deleted))
stack.enter_context(test.swap(
platform_auth_services, 'get_auth_id_from_user_id',
stub.get_auth_id_from_user_id))
stack.enter_context(test.swap(
platform_auth_services, 'get_user_id_from_auth_id',
stub.get_user_id_from_auth_id))
stack.enter_context(test.swap(
platform_auth_services, 'get_multi_user_ids_from_auth_ids',
stub.get_multi_user_ids_from_auth_ids))
stack.enter_context(test.swap(
platform_auth_services, 'get_multi_auth_ids_from_user_ids',
stub.get_multi_auth_ids_from_user_ids))
stack.enter_context(test.swap(
platform_auth_services, 'associate_auth_id_with_user_id',
stub.associate_auth_id_with_user_id))
stack.enter_context(test.swap(
platform_auth_services,
'associate_multi_auth_ids_with_user_ids',
stub.associate_multi_auth_ids_with_user_ids))
# Standard usage of ExitStack: enter a bunch of context managers
# from the safety of an ExitStack's context. Once they've all been
# opened, pop_all() of them off of the original context so they can
# *stay* open. Calling the function returned will exit all of them
# in reverse order.
# https://docs.python.org/3/library/contextlib.html#cleaning-up-in-an-enter-implementation
return stack.pop_all().close
@classmethod
def establish_auth_session(cls, unused_request, unused_response):
pass
@classmethod
def destroy_auth_session(cls, unused_response):
pass
@classmethod
def get_auth_claims_from_request(cls, unused_request):
auth_id = os.environ.get('USER_ID', '')
email = os.environ.get('USER_EMAIL', '')
role_is_super_admin = os.environ.get('USER_IS_ADMIN', '0') == '1'
if auth_id:
return auth_domain.AuthClaims(auth_id, email, role_is_super_admin)
return None
def mark_user_for_deletion(self, user_id):
self._user_id_by_auth_id = {
a: u for a, u in self._user_id_by_auth_id.items() if u != user_id
}
def delete_external_auth_associations(self, user_id):
self._external_user_id_associations.discard(user_id)
def verify_external_auth_associations_are_deleted(self, user_id):
return user_id not in self._external_user_id_associations
def get_auth_id_from_user_id(self, user_id):
return python_utils.NEXT(
(a for a, u in self._user_id_by_auth_id.items() if u == user_id),
None)
def get_user_id_from_auth_id(self, auth_id):
return self._user_id_by_auth_id.get(auth_id, None)
def get_multi_user_ids_from_auth_ids(self, auth_ids):
return [self._user_id_by_auth_id.get(a, None) for a in auth_ids]
def get_multi_auth_ids_from_user_ids(self, user_ids):
auth_id_by_user_id = {u: a for a, u in self._user_id_by_auth_id.items()}
return [auth_id_by_user_id.get(u, None) for u in user_ids]
def associate_auth_id_with_user_id(self, auth_id_user_id_pair):
auth_id, user_id = auth_id_user_id_pair
if auth_id in self._user_id_by_auth_id:
raise Exception(
'auth_id=%r is already associated with user_id=%r' % (
auth_id, self._user_id_by_auth_id[auth_id]))
auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id).put()
self._external_user_id_associations.add(user_id)
self._user_id_by_auth_id[auth_id] = user_id
def associate_multi_auth_ids_with_user_ids(self, auth_id_user_id_pairs):
collisions = ', '.join(
'{auth_id=%r: user_id=%r}' % (a, self._user_id_by_auth_id[a])
for a, _ in auth_id_user_id_pairs if a in self._user_id_by_auth_id)
if collisions:
raise Exception('already associated: %s' % collisions)
datastore_services.put_multi(
[auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id)
for auth_id, user_id in auth_id_user_id_pairs])
self._external_user_id_associations.add(
u for _, u in auth_id_user_id_pairs)
self._user_id_by_auth_id.update(auth_id_user_id_pairs)
class TaskqueueServicesStub(python_utils.OBJECT):
def __init__(self, test_base):
self._test_base = test_base
self._client = cloud_tasks_emulator.Emulator(
task_handler=self._task_handler, automatic_task_handling=False)
def _task_handler(self, url, payload, queue_name, task_name=None):
headers = {
'X-Appengine-QueueName': python_utils.convert_to_bytes(queue_name),
'X-Appengine-TaskName': (
# Maps empty strings to None so the output can become 'None'.
python_utils.convert_to_bytes(task_name or None)),
'X-AppEngine-Fake-Is-Admin': python_utils.convert_to_bytes(1),
}
csrf_token = self._test_base.get_new_csrf_token()
self._test_base.post_task(url, payload, headers, csrf_token=csrf_token)
def create_http_task(
self, queue_name, url, payload=None, scheduled_for=None,
task_name=None):
# Causes the task to execute immediately by setting the scheduled_for
# time to 0. If we allow scheduled_for to be non-zero, then tests that
# rely on the actions made by the task will become unreliable.
scheduled_for = 0
self._client.create_task(
queue_name, url, payload, scheduled_for=scheduled_for,
task_name=task_name)
def count_jobs_in_taskqueue(self, queue_name=None):
return self._client.get_number_of_tasks(queue_name=queue_name)
def process_and_flush_tasks(self, queue_name=None):
self._client.process_and_flush_tasks(queue_name=queue_name)
def get_pending_tasks(self, queue_name=None):
return self._client.get_tasks(queue_name=queue_name)
class MemoryCacheServicesStub(python_utils.OBJECT):
_CACHE_DICT = {}
def get_memory_cache_stats(self):
return caching_domain.MemoryCacheStats(0, 0, len(self._CACHE_DICT))
def flush_cache(self):
self._CACHE_DICT.clear()
def get_multi(self, keys):
assert isinstance(keys, list)
return [self._CACHE_DICT.get(key, None) for key in keys]
def set_multi(self, key_value_mapping):
assert isinstance(key_value_mapping, dict)
self._CACHE_DICT.update(key_value_mapping)
return True
def delete_multi(self, keys):
assert all(isinstance(key, python_utils.BASESTRING) for key in keys)
keys_to_delete = [key for key in keys if key in self._CACHE_DICT]
for key in keys_to_delete:
del self._CACHE_DICT[key]
return len(keys_to_delete)
class TestBase(unittest.TestCase):
maxDiff = 2500
# A test unicode string.
UNICODE_TEST_STRING = 'unicode ¡马!'
def _get_unicode_test_string(self, suffix):
return '%s%s' % (self.UNICODE_TEST_STRING, suffix)
def _assert_validation_error(self, item, error_substring):
with self.assertRaisesRegexp(utils.ValidationError, error_substring):
item.validate()
def log_line(self, line):
# We are using the b' prefix as all the stdouts are in bytes.
python_utils.PRINT(
b'%s%s' % (LOG_LINE_PREFIX, python_utils.convert_to_bytes(line)))
def shortDescription(self):
return None
def get_updated_param_dict(
self, param_dict, param_changes, exp_param_specs):
new_param_dict = copy.deepcopy(param_dict)
for param_change in param_changes:
try:
obj_type = exp_param_specs[param_change.name].obj_type
except:
raise Exception('Parameter %s not found' % param_change.name)
new_param_dict[param_change.name] = (
param_change.get_normalized_value(obj_type, new_param_dict))
return new_param_dict
def get_static_asset_filepath(self):
return '' if constants.DEV_MODE else os.path.join('build')
def get_static_asset_url(self, asset_suffix):
return '/assets%s%s' % (utils.get_asset_dir_prefix(), asset_suffix)
@contextlib.contextmanager
def capture_logging(self, min_level=logging.NOTSET):
captured_logs = []
class ListStream(python_utils.OBJECT):
def write(self, msg):
captured_logs.append(msg.strip())
def flush(self):
pass
list_stream_handler = logging.StreamHandler(stream=ListStream())
logger = logging.getLogger()
old_level = logger.level
logger.addHandler(list_stream_handler)
logger.setLevel(min_level)
try:
yield captured_logs
finally:
logger.setLevel(old_level)
logger.removeHandler(list_stream_handler)
@contextlib.contextmanager
def swap(self, obj, attr, newvalue):
original = getattr(obj, attr)
setattr(obj, attr, newvalue)
try:
yield
finally:
setattr(obj, attr, original)
@contextlib.contextmanager
def swap_to_always_return(self, obj, attr, value=None):
def function_that_always_returns(*unused_args, **unused_kwargs):
return value
with self.swap(obj, attr, function_that_always_returns):
yield
@contextlib.contextmanager
def swap_to_always_raise(self, obj, attr, error=Exception):
def function_that_always_raises(*unused_args, **unused_kwargs):
raise error
with self.swap(obj, attr, function_that_always_raises):
yield
@contextlib.contextmanager
def swap_with_checks(
self, obj, attr, new_value, expected_args=None,
expected_kwargs=None, called=True):
original = getattr(obj, attr)
msg = 'Expected checks failed when swapping out in %s.%s tests.' % (
obj.__name__, attr)
def wrapper(*args, **kwargs):
wrapper.called = True
if expected_args is not None:
self.assertEqual(args, expected_args[0], msg=msg)
expected_args.pop(0)
if expected_kwargs is not None:
self.assertEqual(kwargs, expected_kwargs[0], msg=msg)
expected_kwargs.pop(0)
result = new_value(*args, **kwargs)
return result
wrapper.called = False
setattr(obj, attr, wrapper)
error_occurred = False
try:
self.longMessage = True
yield
except Exception:
error_occurred = True
raise
finally:
setattr(obj, attr, original)
if not error_occurred:
self.assertEqual(wrapper.called, called, msg=msg)
self.assertFalse(expected_args, msg=msg)
self.assertFalse(expected_kwargs, msg=msg)
self.longMessage = False
def assertRaises(self, *args, **kwargs):
raise NotImplementedError(
'self.assertRaises should not be used in these tests. Please use '
'self.assertRaisesRegexp instead.')
def assertRaisesRegexp( self, expected_exception, expected_regexp, callable_obj=None,
*args, **kwargs):
if not expected_regexp:
raise Exception(
'Please provide a sufficiently strong regexp string to '
'validate that the correct error is being raised.')
return super(TestBase, self).assertRaisesRegexp(
expected_exception, expected_regexp,
callable_obj=callable_obj, *args, **kwargs)
def assert_matches_regexps(self, items, regexps, full_match=False):
get_match = re.match if full_match else re.search
differences = [
'~ [i=%d]:\t%r does not match: %r' % (i, item, regexp)
for i, (regexp, item) in enumerate(python_utils.ZIP(regexps, items))
if get_match(regexp, item, re.DOTALL) is None
]
if len(items) < len(regexps):
extra_regexps = regexps[len(items):]
differences.extend(
'- [i=%d]:\tmissing item expected to match: %r' % (i, regexp)
for i, regexp in enumerate(extra_regexps, start=len(items)))
if len(regexps) < len(items):
extra_items = items[len(regexps):]
differences.extend(
'+ [i=%d]:\textra item %r' % (i, item)
for i, item in enumerate(extra_items, start=len(regexps)))
if differences:
error_message = 'Lists differ:\n\t%s' % '\n\t'.join(differences)
raise AssertionError(error_message)
class AppEngineTestBase(TestBase):
AUTH_DOMAIN = 'example.com'
HTTP_HOST = 'localhost'
SERVER_NAME = 'localhost'
SERVER_PORT = '8080'
DEFAULT_VERSION_HOSTNAME = '%s:%s' % (HTTP_HOST, SERVER_PORT)
def __init__(self, *args, **kwargs):
super(AppEngineTestBase, self).__init__(*args, **kwargs)
self._platform_taskqueue_services_stub = TaskqueueServicesStub(self)
def setUp(self):
super(AppEngineTestBase, self).setUp()
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.setup_env(
overwrite=True,
auth_domain=self.AUTH_DOMAIN, http_host=self.HTTP_HOST,
server_name=self.SERVER_NAME, server_port=self.SERVER_PORT,
default_version_hostname=self.DEFAULT_VERSION_HOSTNAME)
# Google App Engine service stubs.
self.testbed.init_app_identity_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_files_stub()
self.testbed.init_memcache_stub()
self.testbed.init_search_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_user_stub()
policy = (
datastore_services.make_instantaneous_global_consistency_policy())
self.testbed.init_datastore_v3_stub(consistency_policy=policy)
# The root path tells the testbed where to find the queue.yaml file.
self.testbed.init_taskqueue_stub(root_path=os.getcwd())
self._testbed_taskqueue_stub = (
self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME))
# Set up apps for testing.
self.testapp = webtest.TestApp(main.app)
self.taskqueue_testapp = webtest.TestApp(main_taskqueue.app)
self.mail_testapp = webtest.TestApp(main_mail.app)
def tearDown(self):
self.testbed.deactivate()
super(AppEngineTestBase, self).tearDown()
def run(self, result=None):
platform_taskqueue_services_swap = self.swap(
platform_taskqueue_services, 'create_http_task',
self._platform_taskqueue_services_stub.create_http_task)
with platform_taskqueue_services_swap:
super(AppEngineTestBase, self).run(result=result)
def _get_all_queue_names(self):
return [q['name'] for q in self._testbed_taskqueue_stub.GetQueues()]
def count_jobs_in_taskqueue(self, queue_name):
return self._platform_taskqueue_services_stub.count_jobs_in_taskqueue(
queue_name=queue_name)
def process_and_flush_pending_tasks(self, queue_name=None):
self._platform_taskqueue_services_stub.process_and_flush_tasks(
queue_name=queue_name)
def get_pending_tasks(self, queue_name=None):
return self._platform_taskqueue_services_stub.get_pending_tasks(
queue_name=queue_name)
def count_jobs_in_mapreduce_taskqueue(self, queue_name):
return len(self.get_pending_mapreduce_tasks(queue_name=queue_name))
def get_pending_mapreduce_tasks(self, queue_name=None):
queue_names = None if queue_name is None else [queue_name]
return self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names)
def _execute_mapreduce_tasks(self, tasks):
for task in tasks:
if task.url == '/_ah/queue/deferred':
deferred.run(task.payload)
else:
# All other tasks will be for MapReduce or taskqueue.
params = task.payload or ''
headers = {
'Content-Length': python_utils.convert_to_bytes(len(params))
}
headers.update(
(key, python_utils.convert_to_bytes(val))
for key, val in task.headers.items())
app = (
self.taskqueue_testapp if task.url.startswith('/task') else
self.testapp)
response = app.post(
task.url, params=params, headers=headers,
expect_errors=True)
if response.status_code != 200:
raise RuntimeError('MapReduce task failed: %r' % task)
def process_and_flush_pending_mapreduce_tasks(self, queue_name=None):
queue_names = (
self._get_all_queue_names() if queue_name is None else [queue_name])
get_enqueued_tasks = lambda: list(
self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names))
# Loops until get_enqueued_tasks() returns an empty list.
for tasks in iter(get_enqueued_tasks, []):
for queue in queue_names:
self._testbed_taskqueue_stub.FlushQueue(queue)
self._execute_mapreduce_tasks(tasks)
def run_but_do_not_flush_pending_mapreduce_tasks(self):
queue_names = self._get_all_queue_names()
tasks = self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names)
for queue in queue_names:
self._testbed_taskqueue_stub.FlushQueue(queue)
self._execute_mapreduce_tasks(tasks)
class GenericTestBase(AppEngineTestBase):
# NOTE: For tests that do not/can not use the default super-admin, authors
# can override the following class-level constant.
AUTO_CREATE_DEFAULT_SUPERADMIN_USER = True
# This is the value that gets returned by default when
# app_identity.get_application_id() is called during tests.
EXPECTED_TEST_APP_ID = 'dummy-cloudsdk-project-id'
SUPER_ADMIN_EMAIL = 'tmpsuperadmin@example.com'
SUPER_ADMIN_USERNAME = 'tmpsuperadm1n'
# Dummy strings representing user attributes. Note that it is up to the
# individual test to actually register these users as editors, admins, etc.
ADMIN_EMAIL = 'admin@example.com'
# Usernames containing the string 'admin' are reserved, so we use 'adm'
# instead.
ADMIN_USERNAME = 'adm'
MODERATOR_EMAIL = 'moderator@example.com'
MODERATOR_USERNAME = 'moderator'
OWNER_EMAIL = 'owner@example.com'
OWNER_USERNAME = 'owner'
EDITOR_EMAIL = 'editor@example.com'
EDITOR_USERNAME = 'editor'
TOPIC_MANAGER_EMAIL = 'topicmanager@example.com'
TOPIC_MANAGER_USERNAME = 'topicmanager'
VOICE_ARTIST_EMAIL = 'voiceartist@example.com'
VOICE_ARTIST_USERNAME = 'voiceartist'
VIEWER_EMAIL = 'viewer@example.com'
VIEWER_USERNAME = 'viewer'
NEW_USER_EMAIL = 'new.user@example.com'
NEW_USER_USERNAME = 'newuser'
DEFAULT_END_STATE_NAME = 'End'
PSEUDONYMOUS_ID = 'pid_%s' % ('a' * 32)
VERSION_0_STATES_DICT = {
feconf.DEFAULT_INIT_STATE_NAME: {
'content': [{'type': 'text', 'value': ''}],
'param_changes': [],
'interaction': {
'customization_args': {},
'id': 'Continue',
'handlers': [{
'name': 'submit',
'rule_specs': [{
'dest': 'END',
'feedback': [],
'param_changes': [],
'definition': {'rule_type': 'default'},
}],
}],
},
},
}
VERSION_27_STATE_DICT = {
'content': {'content_id': 'content', 'html': ''},
'param_changes': [],
'content_ids_to_audio_translations': {
'content': {},
'default_outcome': {},
'hint_1': {},
'solution': {},
},
'written_translations': {
'translations_mapping': {
'content': {},
'default_outcome': {},
'hint_1': {},
'solution': {},
},
},
'interaction': {
'solution': {
'correct_answer': 'Solution',
'explanation': {
'content_id': 'solution',
'html': '<p>Solution explanation</p>',
},
'answer_is_exclusive': False,
},
'answer_groups': [],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': '',
},
'dest': None,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': True,
},
'customization_args': {
'rows': {'value': 1},
'placeholder': {'value': 'Enter text here'},
},
'confirmed_unclassified_answers': [],
'id': 'TextInput',
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': '<p>Hint 1</p>',
},
}],
},
'classifier_model_id': None,
}
VERSION_21_STATE_DICT = {
'END': {
'classifier_model_id': None,
'content': {
'content_id': 'content',
'html': 'Congratulations, you have finished!',
},
'content_ids_to_audio_translations': {'content': {}},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {
'recommendedExplorationIds': {'value': []},
},
'default_outcome': None,
'hints': [],
'id': 'EndExploration',
'solution': None,
},
'param_changes': [],
},
'Introduction': {
'classifier_model_id': None,
'content': {'content_id': 'content', 'html': ''},
'content_ids_to_audio_translations': {
'content': {},
'default_outcome': {},
'feedback_1': {},
},
'interaction': {
'answer_groups': [{
'outcome': {
'dest': 'END',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Correct!</p>',
},
'labelled_as_correct': False,
'missing_prerequisite_skill_id': None,
'param_changes': [],
'refresher_exploration_id': None,
},
'rule_specs': [{
'inputs': {'x': 'InputString'},
'rule_type': 'Equals',
}],
'tagged_misconception_id': None,
'training_data': ['answer1', 'answer2', 'answer3'],
}],
'confirmed_unclassified_answers': [],
'customization_args': {
'placeholder': {'value': ''},
'rows': {'value': 1},
},
'default_outcome': {
'dest': 'Introduction',
'feedback': {'content_id': 'default_outcome', 'html': ''},
'labelled_as_correct': False,
'missing_prerequisite_skill_id': None,
'param_changes': [],
'refresher_exploration_id': None,
},
'hints': [],
'id': 'TextInput',
'solution': None,
},
'param_changes': [],
},
}
VERSION_1_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'prerequisite_skill_ids': [],
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_2_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_3_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'description': '',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_4_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math math_content-with-value="{'
'&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, '
'&quot;svg_filename&quot;: &quot;&quot;'
'}">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'description': '',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_1_SUBTOPIC_DICT = {
'skill_ids': ['skill_1'],
'id': 1,
'title': 'A subtitle',
}
# Dictionary-like data structures within sample YAML must be formatted
# alphabetically to match string equivalence with YAML generation tests. The
# indentations are also important, since it is used to define nesting (just
# like Python).
#
# If evaluating differences in YAML, conversion to dict form via
# utils.dict_from_yaml can isolate differences quickly.
SAMPLE_YAML_CONTENT = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
states:
%s:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: %d
tags: []
title: Title
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME, feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_STATE_SCHEMA_VERSION)
SAMPLE_UNTITLED_YAML_CONTENT = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
states:
%s:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback: []
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
fallbacks: []
id: null
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback: []
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
fallbacks: []
id: null
param_changes: []
states_schema_version: %d
tags: []
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.LAST_UNTITLED_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME, feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_STATE_SCHEMA_VERSION)
def run(self, result=None):
memory_cache_services_stub = MemoryCacheServicesStub()
memory_cache_services_stub.flush_cache()
es_stub = ElasticSearchStub()
es_stub.reset()
with contextlib2.ExitStack() as stack:
stack.callback(AuthServicesStub.install_stub(self))
stack.enter_context(self.swap(
elastic_search_services.ES.indices, 'create',
es_stub.mock_create_index))
stack.enter_context(self.swap(
elastic_search_services.ES, 'index',
es_stub.mock_index))
stack.enter_context(self.swap(
elastic_search_services.ES, 'exists',
es_stub.mock_exists))
stack.enter_context(self.swap(
elastic_search_services.ES, 'delete',
es_stub.mock_delete))
stack.enter_context(self.swap(
elastic_search_services.ES, 'delete_by_query',
es_stub.mock_delete_by_query))
stack.enter_context(self.swap(
elastic_search_services.ES, 'search',
es_stub.mock_search))
stack.enter_context(self.swap(
memory_cache_services, 'flush_cache',
memory_cache_services_stub.flush_cache))
stack.enter_context(self.swap(
memory_cache_services, 'get_multi',
memory_cache_services_stub.get_multi))
stack.enter_context(self.swap(
memory_cache_services, 'set_multi',
memory_cache_services_stub.set_multi))
stack.enter_context(self.swap(
memory_cache_services, 'get_memory_cache_stats',
memory_cache_services_stub.get_memory_cache_stats))
stack.enter_context(self.swap(
memory_cache_services, 'delete_multi',
memory_cache_services_stub.delete_multi))
super(GenericTestBase, self).run(result=result)
def setUp(self):
super(GenericTestBase, self).setUp()
if self.AUTO_CREATE_DEFAULT_SUPERADMIN_USER:
self.signup_superadmin_user()
def tearDown(self):
datastore_services.delete_multi(
datastore_services.query_everything().iter(keys_only=True))
super(GenericTestBase, self).tearDown()
def login(self, email, is_super_admin=False):
self.testbed.setup_env(
overwrite=True,
user_email=email, user_id=self.get_auth_id_from_email(email),
user_is_admin=('1' if is_super_admin else '0'))
def logout(self):
self.testbed.setup_env(
overwrite=True, user_email='', user_id='', user_is_admin='0')
@contextlib.contextmanager
def mock_datetime_utcnow(self, mocked_datetime):
with datastore_services.mock_datetime_for_datastore(mocked_datetime):
yield
@contextlib.contextmanager
def login_context(self, email, is_super_admin=False):
self.login(email, is_super_admin=is_super_admin)
try:
yield self.get_user_id_from_email(email)
finally:
self.logout()
@contextlib.contextmanager
def super_admin_context(self):
email = self.SUPER_ADMIN_EMAIL
with self.login_context(email, is_super_admin=True) as user_id:
yield user_id
def signup(self, email, username):
user_services.create_new_user(self.get_auth_id_from_email(email), email)
with self.login_context(email), requests_mock.Mocker() as m:
# We mock out all HTTP requests while trying to signup to avoid
# calling out to real backend services.
m.request(requests_mock.ANY, requests_mock.ANY)
response = self.get_html_response(feconf.SIGNUP_URL)
self.assertEqual(response.status_int, 200)
response = self.testapp.post(feconf.SIGNUP_DATA_URL, params={
'csrf_token': self.get_new_csrf_token(),
'payload': json.dumps(
{'username': username, 'agreed_to_terms': True}),
})
self.assertEqual(response.status_int, 200)
def signup_superadmin_user(self):
self.signup(self.SUPER_ADMIN_EMAIL, self.SUPER_ADMIN_USERNAME)
def set_config_property(self, config_obj, new_config_value):
with self.super_admin_context():
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_obj.name: new_config_value,
},
}, csrf_token=self.get_new_csrf_token())
def set_user_role(self, username, user_role):
with self.super_admin_context():
self.post_json('/adminrolehandler', {
'username': username,
'role': user_role,
}, csrf_token=self.get_new_csrf_token())
def set_admins(self, admin_usernames):
for name in admin_usernames:
self.set_user_role(name, feconf.ROLE_ID_ADMIN)
def set_topic_managers(self, topic_manager_usernames):
for name in topic_manager_usernames:
self.set_user_role(name, feconf.ROLE_ID_TOPIC_MANAGER)
def set_moderators(self, moderator_usernames):
for name in moderator_usernames:
self.set_user_role(name, feconf.ROLE_ID_MODERATOR)
def set_banned_users(self, banned_usernames):
for name in banned_usernames:
self.set_user_role(name, feconf.ROLE_ID_BANNED_USER)
def set_collection_editors(self, collection_editor_usernames):
for name in collection_editor_usernames:
self.set_user_role(name, feconf.ROLE_ID_COLLECTION_EDITOR)
def get_user_id_from_email(self, email):
user_settings = user_services.get_user_settings_by_auth_id(
self.get_auth_id_from_email(email))
return user_settings and user_settings.user_id
@classmethod
def get_auth_id_from_email(cls, email):
# Although the hash function doesn't guarantee a one-to-one mapping, in
return python_utils.convert_to_bytes(abs(hash(email)))
def _get_response(
self, url, expected_content_type, params=None,
expected_status_int=200):
if params is not None:
self.assertIsInstance(params, dict)
expect_errors = expected_status_int >= 400
with self.swap(base, 'load_template', mock_load_template):
response = self.testapp.get(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
if expect_errors:
self.assertTrue(response.status_int >= 400)
else:
self.assertTrue(200 <= response.status_int < 400)
self.assertEqual(response.status_int, expected_status_int)
self.assertEqual(response.content_type, expected_content_type)
return response
def get_html_response(self, url, params=None, expected_status_int=200):
return self._get_response(
url, 'text/html', params=params,
expected_status_int=expected_status_int)
def get_custom_response(
self, url, expected_content_type, params=None,
expected_status_int=200):
self.assertNotIn(
expected_content_type, ['text/html', 'application/json'])
return self._get_response(
url, expected_content_type, params=params,
expected_status_int=expected_status_int)
def get_response_without_checking_for_errors(
self, url, expected_status_int_list, params=None):
if params is not None:
self.assertIsInstance(
params, dict,
msg='Expected params to be a dict, received %s' % params)
with self.swap(base, 'load_template', mock_load_template):
response = self.testapp.get(url, params=params, expect_errors=True)
self.assertIn(response.status_int, expected_status_int_list)
return response
def _parse_json_response(self, json_response, expect_errors):
if expect_errors:
self.assertTrue(json_response.status_int >= 400)
else:
self.assertTrue(200 <= json_response.status_int < 400)
self.assertEqual(json_response.content_type, 'application/json')
self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX))
return json.loads(json_response.body[len(feconf.XSSI_PREFIX):])
def get_json(self, url, params=None, expected_status_int=200):
if params is not None:
self.assertIsInstance(params, dict)
expect_errors = expected_status_int >= 400
json_response = self.testapp.get(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def post_json(
self, url, payload, csrf_token=None, expected_status_int=200,
upload_files=None):
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
expect_errors = expected_status_int >= 400
json_response = self._send_post_request(
self.testapp, url, data, expect_errors,
expected_status_int=expected_status_int, upload_files=upload_files)
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def delete_json(self, url, params='', expected_status_int=200):
if params:
self.assertIsInstance(
params, dict,
msg='Expected params to be a dict, received %s' % params)
expect_errors = expected_status_int >= 400
json_response = self.testapp.delete(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def _send_post_request(
self, app, url, data, expect_errors, expected_status_int=200,
upload_files=None, headers=None):
if upload_files is not None:
upload_files = tuple(
tuple(python_utils.convert_to_bytes(f) for f in upload_file)
for upload_file in upload_files)
return app.post(
url, params=data, headers=headers, status=expected_status_int,
upload_files=upload_files, expect_errors=expect_errors)
def post_email(
self, recipient_email, sender_email, subject, body, html_body=None,
expect_errors=False, expected_status_int=200):
email = mail.EmailMessage(
sender=sender_email, to=recipient_email, subject=subject, body=body)
if html_body is not None:
email.html = html_body
mime_email = email.to_mime_message()
headers = {
'Content-Type': mime_email.get_content_type(),
}
data = mime_email.as_string()
incoming_email_url = '/_ah/mail/%s' % recipient_email
return self._send_post_request(
self.mail_testapp, incoming_email_url, data, expect_errors,
headers=headers, expected_status_int=expected_status_int)
def post_task(
self, url, payload, headers, csrf_token=None, expect_errors=False,
expected_status_int=200):
if csrf_token:
payload['csrf_token'] = csrf_token
return self.taskqueue_testapp.post(
url, params=json.dumps(payload), headers=headers,
status=expected_status_int, expect_errors=expect_errors,
content_type='application/json')
def put_json(self, url, payload, csrf_token=None, expected_status_int=200):
params = {'payload': json.dumps(payload)}
if csrf_token:
params['csrf_token'] = csrf_token
expect_errors = expected_status_int >= 400
json_response = self.testapp.put(
url, params=params, expect_errors=expect_errors)
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def get_new_csrf_token(self):
response = self.get_json('/csrfhandler')
return response['token']
def save_new_default_exploration(
self, exploration_id, owner_id, title='A title'):
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, category='Algebra')
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def set_interaction_for_state(self, state, interaction_id):
next_content_id_index_dict = {'value': state.next_content_id_index}
def traverse_schema_and_assign_content_ids(value, schema, contentId):
is_subtitled_html_spec = (
schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and
schema['obj_type'] ==
schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_HTML)
is_subtitled_unicode_spec = (
schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and
schema['obj_type'] ==
schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_UNICODE)
if is_subtitled_html_spec or is_subtitled_unicode_spec:
value['content_id'] = '%s_%i' % (
contentId, next_content_id_index_dict['value'])
next_content_id_index_dict['value'] += 1
elif schema['type'] == schema_utils.SCHEMA_TYPE_LIST:
for x in value:
traverse_schema_and_assign_content_ids(
x, schema['items'], contentId)
elif schema['type'] == schema_utils.SCHEMA_TYPE_DICT:
for schema_property in schema['properties']:
traverse_schema_and_assign_content_ids(
x[schema_property.name],
schema_property['schema'],
'%s_%s' % (contentId, schema_property.name))
interaction = (
interaction_registry.Registry.get_interaction_by_id(interaction_id))
ca_specs = interaction.customization_arg_specs
customization_args = {}
for ca_spec in ca_specs:
ca_name = ca_spec.name
ca_value = ca_spec.default_value
traverse_schema_and_assign_content_ids(
ca_value, ca_spec.schema, 'ca_%s' % ca_name)
customization_args[ca_name] = {'value': ca_value}
state.update_interaction_id(interaction_id)
state.update_interaction_customization_args(customization_args)
state.update_next_content_id_index(next_content_id_index_dict['value'])
def save_new_valid_exploration(
self, exploration_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE, end_state_name=None,
interaction_id='TextInput', correctness_feedback_enabled=False):
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, category=category,
language_code=language_code)
self.set_interaction_for_state(
exploration.states[exploration.init_state_name], interaction_id)
exploration.objective = objective
exploration.correctness_feedback_enabled = correctness_feedback_enabled
if end_state_name is not None:
exploration.add_states([end_state_name])
end_state = exploration.states[end_state_name]
self.set_interaction_for_state(end_state, 'EndExploration')
end_state.update_interaction_default_outcome(None)
init_state = exploration.states[exploration.init_state_name]
init_interaction = init_state.interaction
init_interaction.default_outcome.dest = end_state_name
if correctness_feedback_enabled:
init_interaction.default_outcome.labelled_as_correct = True
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_linear_exp_with_state_names_and_interactions(
self, exploration_id, owner_id, state_names, interaction_ids,
title='A title', category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE):
if not state_names:
raise ValueError('must provide at least one state name')
if not interaction_ids:
raise ValueError('must provide at least one interaction type')
interaction_ids = itertools.cycle(interaction_ids)
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, init_state_name=state_names[0],
category=category, objective=objective, language_code=language_code)
exploration.add_states(state_names[1:])
for from_state_name, dest_state_name in (
python_utils.ZIP(state_names[:-1], state_names[1:])):
from_state = exploration.states[from_state_name]
self.set_interaction_for_state(
from_state, python_utils.NEXT(interaction_ids))
from_state.interaction.default_outcome.dest = dest_state_name
end_state = exploration.states[state_names[-1]]
self.set_interaction_for_state(end_state, 'EndExploration')
end_state.update_interaction_default_outcome(None)
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_exp_with_states_schema_v0(self, exp_id, user_id, title):
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title=title,
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=0,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME,
states=self.VERSION_0_STATES_DICT, param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'%s\'.' % title
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title=title, category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.put()
stats_services.create_exp_issues_for_new_exploration(exp_id, 1)
def save_new_exp_with_custom_states_schema_version(
self, exp_id, user_id, states_dict, version):
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title='title',
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=version,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME, states=states_dict,
param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'title\'.'
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title='title', category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.put()
def save_new_exp_with_states_schema_v21(self, exp_id, user_id, title):
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title=title,
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=21,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME,
states=self.VERSION_21_STATE_DICT, param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'%s\'.' % title
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title=title, category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.put()
def publish_exploration(self, owner_id, exploration_id):
committer = user_services.UserActionsInfo(owner_id)
rights_manager.publish_exploration(committer, exploration_id)
def save_new_default_collection(
self, collection_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE):
collection = collection_domain.Collection.create_default_collection(
collection_id, title=title, category=category, objective=objective,
language_code=language_code)
collection_services.save_new_collection(owner_id, collection)
return collection
def save_new_valid_collection(
self, collection_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE,
exploration_id='an_exploration_id',
end_state_name=DEFAULT_END_STATE_NAME):
collection = collection_domain.Collection.create_default_collection(
collection_id, title=title, category=category, objective=objective,
language_code=language_code)
exploration = (
exp_fetchers.get_exploration_by_id(exploration_id, strict=False))
if exploration is None:
exploration = self.save_new_valid_exploration(
exploration_id, owner_id, title=title, category=category,
objective=objective, end_state_name=end_state_name)
collection.add_node(exploration.id)
collection_services.save_new_collection(owner_id, collection)
return collection
def publish_collection(self, owner_id, collection_id):
committer = user_services.UserActionsInfo(owner_id)
rights_manager.publish_collection(committer, collection_id)
def save_new_story(
self, story_id, owner_id, corresponding_topic_id,
title='Title', description='Description', notes='Notes',
language_code=constants.DEFAULT_LANGUAGE_CODE,
url_fragment='title', meta_tag_content='story meta tag content'):
story = story_domain.Story.create_default_story(
story_id, title, description, corresponding_topic_id, url_fragment)
story.title = title
story.description = description
story.notes = notes
story.language_code = language_code
story.url_fragment = url_fragment
story.meta_tag_content = meta_tag_content
story_services.save_new_story(owner_id, story)
return story
def save_new_story_with_story_contents_schema_v1(
self, story_id, thumbnail_filename, thumbnail_bg_color,
owner_id, title, description, notes, corresponding_topic_id,
language_code=constants.DEFAULT_LANGUAGE_CODE,
url_fragment='story-frag',
meta_tag_content='story meta tag content'):
story_model = story_models.StoryModel(
id=story_id, thumbnail_filename=thumbnail_filename,
thumbnail_bg_color=thumbnail_bg_color, description=description,
title=title, language_code=language_code,
story_contents_schema_version=1, notes=notes,
corresponding_topic_id=corresponding_topic_id,
story_contents=self.VERSION_1_STORY_CONTENTS_DICT,
url_fragment=url_fragment, meta_tag_content=meta_tag_content)
commit_message = 'New story created with title \'%s\'.' % title
story_model.commit(
owner_id, commit_message,
[{'cmd': story_domain.CMD_CREATE_NEW, 'title': title}])
def save_new_subtopic(self, subtopic_id, owner_id, topic_id):
subtopic_page = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page(
subtopic_id, topic_id))
subtopic_changes = [
subtopic_page_domain.SubtopicPageChange({
'cmd': subtopic_page_domain.CMD_CREATE_NEW,
'topic_id': topic_id,
'subtopic_id': subtopic_id,
})
]
subtopic_page_services.save_subtopic_page(
owner_id, subtopic_page, 'Create new subtopic', subtopic_changes)
return subtopic_page
def save_new_topic(
self, topic_id, owner_id, name='topic', abbreviated_name='topic',
url_fragment='topic',
thumbnail_filename='topic.svg',
thumbnail_bg_color=(
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0]),
description='description', canonical_story_ids=None,
additional_story_ids=None, uncategorized_skill_ids=None,
subtopics=None, next_subtopic_id=0,
language_code=constants.DEFAULT_LANGUAGE_CODE,
meta_tag_content='topic meta tag content',
practice_tab_is_displayed=False,
page_title_fragment_for_web='topic page title'):
canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(story_id)
for story_id in (canonical_story_ids or [])
]
additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(story_id)
for story_id in (additional_story_ids or [])
]
uncategorized_skill_ids = uncategorized_skill_ids or []
subtopics = subtopics or []
topic = topic_domain.Topic(
topic_id, name, abbreviated_name, url_fragment, thumbnail_filename,
thumbnail_bg_color, description, canonical_story_references,
additional_story_references, uncategorized_skill_ids, subtopics,
feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION, next_subtopic_id,
language_code, 0, feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION,
meta_tag_content, practice_tab_is_displayed,
page_title_fragment_for_web)
topic_services.save_new_topic(owner_id, topic)
return topic
def save_new_topic_with_subtopic_schema_v1(
self, topic_id, owner_id, name, abbreviated_name, url_fragment,
canonical_name, description, thumbnail_filename, thumbnail_bg_color,
canonical_story_references, additional_story_references,
uncategorized_skill_ids, next_subtopic_id,
language_code=constants.DEFAULT_LANGUAGE_CODE,
meta_tag_content='topic meta tag content',
practice_tab_is_displayed=False,
page_title_fragment_for_web='topic page title'):
topic_rights_model = topic_models.TopicRightsModel(
id=topic_id, manager_ids=[], topic_is_published=True)
topic_model = topic_models.TopicModel(
id=topic_id, name=name, abbreviated_name=abbreviated_name,
url_fragment=url_fragment, thumbnail_filename=thumbnail_filename,
thumbnail_bg_color=thumbnail_bg_color,
canonical_name=canonical_name, description=description,
language_code=language_code,
canonical_story_references=canonical_story_references,
additional_story_references=additional_story_references,
uncategorized_skill_ids=uncategorized_skill_ids,
subtopic_schema_version=1,
story_reference_schema_version=(
feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION),
next_subtopic_id=next_subtopic_id,
subtopics=[self.VERSION_1_SUBTOPIC_DICT],
meta_tag_content=meta_tag_content,
practice_tab_is_displayed=practice_tab_is_displayed,
page_title_fragment_for_web=page_title_fragment_for_web)
commit_message = 'New topic created with name \'%s\'.' % name
topic_rights_model.commit(
committer_id=owner_id,
commit_message='Created new topic rights',
commit_cmds=[{'cmd': topic_domain.CMD_CREATE_NEW}])
topic_model.commit(
owner_id, commit_message,
[{'cmd': topic_domain.CMD_CREATE_NEW, 'name': name}])
def save_new_question(
self, question_id, owner_id, question_state_data,
linked_skill_ids, inapplicable_skill_misconception_ids=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
question = question_domain.Question(
question_id, question_state_data,
feconf.CURRENT_STATE_SCHEMA_VERSION, language_code, 0,
linked_skill_ids, inapplicable_skill_misconception_ids or [])
question_services.add_question(owner_id, question)
return question
def save_new_question_with_state_data_schema_v27(
self, question_id, owner_id, linked_skill_ids,
inapplicable_skill_misconception_ids=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
question_model = question_models.QuestionModel(
id=question_id, question_state_data=self.VERSION_27_STATE_DICT,
language_code=language_code, version=1,
question_state_data_schema_version=27,
linked_skill_ids=linked_skill_ids,
inapplicable_skill_misconception_ids=(
inapplicable_skill_misconception_ids or []))
question_model.commit(
owner_id, 'New question created',
[{'cmd': question_domain.CMD_CREATE_NEW}])
def save_new_question_suggestion_with_state_data_schema_v27(
self, author_id, skill_id, suggestion_id=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
score_category = (
suggestion_models.SCORE_TYPE_QUESTION +
suggestion_models.SCORE_CATEGORY_DELIMITER + skill_id)
change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self.VERSION_27_STATE_DICT,
'question_state_data_schema_version': 27,
'language_code': language_code,
'linked_skill_ids': [skill_id],
'inapplicable_skill_misconception_ids': []
},
'skill_id': skill_id,
'skill_difficulty': 0.3
}
if suggestion_id is None:
suggestion_id = (
feedback_models.GeneralFeedbackThreadModel.
generate_new_thread_id(
feconf.ENTITY_TYPE_SKILL, skill_id))
suggestion_models.GeneralSuggestionModel.create(
feconf.SUGGESTION_TYPE_ADD_QUESTION,
feconf.ENTITY_TYPE_SKILL, skill_id, 1,
suggestion_models.STATUS_IN_REVIEW, author_id, None, change,
score_category, suggestion_id, language_code)
return suggestion_id
def save_new_skill(
self, skill_id, owner_id, description='description',
misconceptions=None, rubrics=None, skill_contents=None,
language_code=constants.DEFAULT_LANGUAGE_CODE,
prerequisite_skill_ids=None):
skill = (
skill_domain.Skill.create_default_skill(skill_id, description, []))
if misconceptions is not None:
skill.misconceptions = misconceptions
skill.next_misconception_id = len(misconceptions) + 1
if skill_contents is not None:
skill.skill_contents = skill_contents
if prerequisite_skill_ids is not None:
skill.prerequisite_skill_ids = prerequisite_skill_ids
if rubrics is not None:
skill.rubrics = rubrics
else:
skill.rubrics = [
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[0], ['Explanation 1']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[1], ['Explanation 2']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[2], ['Explanation 3']),
]
skill.language_code = language_code
skill.version = 0
skill_services.save_new_skill(owner_id, skill)
return skill
def save_new_skill_with_defined_schema_versions(
self, skill_id, owner_id, description, next_misconception_id,
misconceptions=None, rubrics=None, skill_contents=None,
misconceptions_schema_version=1, rubric_schema_version=1,
skill_contents_schema_version=1,
language_code=constants.DEFAULT_LANGUAGE_CODE):
skill_model = skill_models.SkillModel(
id=skill_id, description=description, language_code=language_code,
misconceptions=misconceptions, rubrics=rubrics,
skill_contents=skill_contents,
next_misconception_id=next_misconception_id,
misconceptions_schema_version=misconceptions_schema_version,
rubric_schema_version=rubric_schema_version,
skill_contents_schema_version=skill_contents_schema_version,
superseding_skill_id=None, all_questions_merged=False)
skill_model.commit(
owner_id, 'New skill created.',
[{'cmd': skill_domain.CMD_CREATE_NEW}])
def _create_valid_question_data(self, default_dest_state_name):
state = state_domain.State.create_default_state(
default_dest_state_name, is_initial_state=True)
state.update_interaction_id('TextInput')
solution_dict = {
'answer_is_exclusive': False,
'correct_answer': 'Solution',
'explanation': {
'content_id': 'solution',
'html': '<p>This is a solution.</p>',
},
}
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>This is a hint.</p>')),
]
solution = state_domain.Solution.from_dict(
state.interaction.id, solution_dict)
state.update_interaction_solution(solution)
state.update_interaction_hints(hints_list)
state.update_interaction_customization_args({
'placeholder': {
'value': {
'content_id': 'ca_placeholder',
'unicode_str': 'Enter text here',
},
},
'rows': {'value': 1},
})
state.update_next_content_id_index(2)
state.interaction.default_outcome.labelled_as_correct = True
state.interaction.default_outcome.dest = None
return state
class LinterTestBase(GenericTestBase):
def setUp(self):
super(LinterTestBase, self).setUp()
self.linter_stdout = []
def mock_print(*args):
self.linter_stdout.append(
' '.join(python_utils.UNICODE(arg) for arg in args))
self.print_swap = self.swap(python_utils, 'PRINT', mock_print)
def assert_same_list_elements(self, phrases, stdout):
self.assertTrue(
any(all(p in output for p in phrases) for output in stdout))
def assert_failed_messages_count(self, stdout, expected_failed_count):
failed_count = sum(msg.startswith('FAILED') for msg in stdout)
self.assertEqual(failed_count, expected_failed_count)
class AuditJobsTestBase(GenericTestBase):
def run_job_and_check_output(
self, expected_output, sort=False, literal_eval=False):
self.process_and_flush_pending_tasks()
job_id = self.job_class.create_new()
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0)
self.job_class.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
self.process_and_flush_pending_tasks()
actual_output = self.job_class.get_output(job_id)
if literal_eval:
actual_output_dict = {}
expected_output_dict = {}
for item in (ast.literal_eval(value) for value in actual_output):
value = item[1]
if isinstance(value, list):
value = sorted(value)
actual_output_dict[item[0]] = value
for item in (ast.literal_eval(value) for value in expected_output):
value = item[1]
if isinstance(value, list):
value = sorted(value)
expected_output_dict[item[0]] = value
self.assertItemsEqual(actual_output_dict, expected_output_dict)
for key in actual_output_dict:
self.assertEqual(
actual_output_dict[key], expected_output_dict[key])
elif sort:
self.assertEqual(sorted(actual_output), sorted(expected_output))
else:
self.assertEqual(actual_output, expected_output)
class EmailMessageMock(python_utils.OBJECT):
def __init__(
self, sender_email, recipient_email, subject, plaintext_body,
html_body, bcc=None, reply_to=None, recipient_variables=None):
self.sender = sender_email
self.to = recipient_email
self.subject = subject
self.body = plaintext_body
self.html = html_body
self.bcc = bcc
self.reply_to = reply_to
self.recipient_variables = recipient_variables
class GenericEmailTestBase(GenericTestBase):
emails_dict = collections.defaultdict(list)
def run(self, result=None):
with self.swap(
email_services, 'send_email_to_recipients',
self._send_email_to_recipients):
super(EmailTestBase, self).run(result=result)
def setUp(self):
super(GenericEmailTestBase, self).setUp()
self._wipe_emails_dict()
def _wipe_emails_dict(self):
self.emails_dict = collections.defaultdict(list)
def _send_email_to_recipients(
self, sender_email, recipient_emails, subject, plaintext_body,
html_body, bcc=None, reply_to=None, recipient_variables=None):
bcc_emails = None
if bcc:
bcc_emails = bcc[0] if len(bcc) == 1 else bcc
new_email = EmailMessageMock(
sender_email, recipient_emails, subject, plaintext_body, html_body,
bcc=bcc_emails, reply_to=(reply_to if reply_to else None),
recipient_variables=(
recipient_variables if recipient_variables else None))
for recipient_email in recipient_emails:
self.emails_dict[recipient_email].append(new_email)
return True
def _get_sent_email_messages(self, to):
return self.emails_dict[to] if to in self.emails_dict else []
def _get_all_sent_email_messages(self):
return self.emails_dict
EmailTestBase = GenericEmailTestBase
class ClassifierTestBase(GenericEmailTestBase):
def post_blob(self, url, payload, expected_status_int=200):
data = payload
expect_errors = False
if expected_status_int >= 400:
expect_errors = True
response = self._send_post_request(
self.testapp, url, data,
expect_errors, expected_status_int=expected_status_int,
headers={b'content-type': b'application/octet-stream'})
self.assertEqual(response.status_int, expected_status_int)
return self._parse_json_response(response, expect_errors)
def _get_classifier_data_from_classifier_training_job(
self, classifier_training_job):
filename = classifier_training_job.classifier_data_filename
file_system_class = fs_services.get_entity_file_system_class()
fs = fs_domain.AbstractFileSystem(file_system_class(
feconf.ENTITY_TYPE_EXPLORATION, classifier_training_job.exp_id))
classifier_data = utils.decompress_from_zlib(fs.get(filename))
classifier_data_proto = text_classifier_pb2.TextClassifierFrozenModel()
classifier_data_proto.ParseFromString(classifier_data)
return classifier_data_proto
class FunctionWrapper(python_utils.OBJECT):
def __init__(self, func):
self._func = func
self._instance = None
def __call__(self, *args, **kwargs):
if self._instance is not None:
args = [self._instance] + list(args)
args_dict = inspect.getcallargs(self._func, *args, **kwargs)
self.pre_call_hook(args_dict)
result = self._func(*args, **kwargs)
self.post_call_hook(args_dict, result)
return result
def __get__(self, instance, owner):
# to bind to the instance self._func was bound to. See the following SO
# answer: https://stackoverflow.com/a/22555978/675311
self._instance = instance
return self
def pre_call_hook(self, args):
pass
def post_call_hook(self, args, result):
pass
class CallCounter(FunctionWrapper):
def __init__(self, f):
super(CallCounter, self).__init__(f)
self._times_called = 0
@property
def times_called(self):
return self._times_called
def pre_call_hook(self, args):
self._times_called += 1
class FailingFunction(FunctionWrapper):
INFINITY = 'infinity'
def __init__(self, f, exception, num_tries_before_success):
super(FailingFunction, self).__init__(f)
self._exception = exception
self._num_tries_before_success = num_tries_before_success
self._always_fail = (
self._num_tries_before_success == FailingFunction.INFINITY)
self._times_called = 0
if not (self._num_tries_before_success >= 0 or self._always_fail):
raise ValueError(
'num_tries_before_success should either be an '
'integer greater than or equal to 0, '
'or FailingFunction.INFINITY')
def pre_call_hook(self, args):
self._times_called += 1
call_should_fail = (
self._num_tries_before_success >= self._times_called)
if call_should_fail or self._always_fail:
raise self._exception
| true
| true
|
f704460cf7ea30ec843e7420c89fc0493ee56776
| 191
|
py
|
Python
|
velbus/modules/__init__.py
|
gitd8400/python-velbus
|
ca5bcbb347b82f2e41b599e7544f560b5f355251
|
[
"MIT"
] | null | null | null |
velbus/modules/__init__.py
|
gitd8400/python-velbus
|
ca5bcbb347b82f2e41b599e7544f560b5f355251
|
[
"MIT"
] | null | null | null |
velbus/modules/__init__.py
|
gitd8400/python-velbus
|
ca5bcbb347b82f2e41b599e7544f560b5f355251
|
[
"MIT"
] | null | null | null |
"""
:author: Thomas Delaet <thomas@delaet.org>
"""
from velbus.modules.vmb4ry import VMB4RYModule
from velbus.modules.vmbin import VMB6INModule
from velbus.modules.vmbin import VMB7INModule
| 23.875
| 46
| 0.806283
|
from velbus.modules.vmb4ry import VMB4RYModule
from velbus.modules.vmbin import VMB6INModule
from velbus.modules.vmbin import VMB7INModule
| true
| true
|
f70446cde10071c4761a7dc95d296e9fa2db3519
| 1,627
|
py
|
Python
|
hijack/tests/test_admin.py
|
sondrelg/django-hijack
|
de8d72fa53cf0abf1ec63105dd7b58ff923528fb
|
[
"MIT"
] | null | null | null |
hijack/tests/test_admin.py
|
sondrelg/django-hijack
|
de8d72fa53cf0abf1ec63105dd7b58ff923528fb
|
[
"MIT"
] | null | null | null |
hijack/tests/test_admin.py
|
sondrelg/django-hijack
|
de8d72fa53cf0abf1ec63105dd7b58ff923528fb
|
[
"MIT"
] | null | null | null |
from unittest.mock import MagicMock
from django.urls import reverse
from hijack.contrib.admin import HijackUserAdminMixin
from hijack.tests.test_app.models import Post
class TestHijackUserAdminMixin:
def test_user_admin(self, admin_client):
url = reverse("admin:test_app_customuser_changelist")
response = admin_client.get(url)
assert response.status_code == 200
assert (
b'<button type="submit" class="button">HIJACK</button>' in response.content
)
def test_related_user(self, admin_client, admin_user):
url = reverse("admin:test_app_post_changelist")
Post.objects.create(author=admin_user)
response = admin_client.get(url)
assert response.status_code == 200
assert b"Hijack admin" in response.content
def test_get_hijack_success_url__obj_absolute_url(self, rf):
obj = Post()
obj.get_absolute_url = MagicMock(return_value="/path/to/obj/")
admin = HijackUserAdminMixin()
assert admin.get_hijack_success_url(None, obj) == "/path/to/obj/"
def test_get_hijack_success_url__obj_no_absolute_url(self, rf):
obj = Post()
admin = HijackUserAdminMixin()
assert admin.get_hijack_success_url(None, obj) == "/accounts/profile/"
def test_get_hijack_success_url__hijack_success_url(self, rf):
obj = Post()
obj.get_absolute_url = MagicMock(return_value="/path/to/obj/")
admin = HijackUserAdminMixin()
admin.hijack_success_url = "/custom/success/path/"
assert admin.get_hijack_success_url(None, obj) == "/custom/success/path/"
| 38.738095
| 87
| 0.700061
|
from unittest.mock import MagicMock
from django.urls import reverse
from hijack.contrib.admin import HijackUserAdminMixin
from hijack.tests.test_app.models import Post
class TestHijackUserAdminMixin:
def test_user_admin(self, admin_client):
url = reverse("admin:test_app_customuser_changelist")
response = admin_client.get(url)
assert response.status_code == 200
assert (
b'<button type="submit" class="button">HIJACK</button>' in response.content
)
def test_related_user(self, admin_client, admin_user):
url = reverse("admin:test_app_post_changelist")
Post.objects.create(author=admin_user)
response = admin_client.get(url)
assert response.status_code == 200
assert b"Hijack admin" in response.content
def test_get_hijack_success_url__obj_absolute_url(self, rf):
obj = Post()
obj.get_absolute_url = MagicMock(return_value="/path/to/obj/")
admin = HijackUserAdminMixin()
assert admin.get_hijack_success_url(None, obj) == "/path/to/obj/"
def test_get_hijack_success_url__obj_no_absolute_url(self, rf):
obj = Post()
admin = HijackUserAdminMixin()
assert admin.get_hijack_success_url(None, obj) == "/accounts/profile/"
def test_get_hijack_success_url__hijack_success_url(self, rf):
obj = Post()
obj.get_absolute_url = MagicMock(return_value="/path/to/obj/")
admin = HijackUserAdminMixin()
admin.hijack_success_url = "/custom/success/path/"
assert admin.get_hijack_success_url(None, obj) == "/custom/success/path/"
| true
| true
|
f70447682772f8dce75902fd8f48d39c34673f82
| 3,109
|
py
|
Python
|
modelpractice/modelpractice/settings.py
|
prernaniraj/python_django_rest_api
|
b69f5dc015c3d84c81bac4fc345d585513d3dda9
|
[
"MIT"
] | null | null | null |
modelpractice/modelpractice/settings.py
|
prernaniraj/python_django_rest_api
|
b69f5dc015c3d84c81bac4fc345d585513d3dda9
|
[
"MIT"
] | null | null | null |
modelpractice/modelpractice/settings.py
|
prernaniraj/python_django_rest_api
|
b69f5dc015c3d84c81bac4fc345d585513d3dda9
|
[
"MIT"
] | null | null | null |
"""
Django settings for modelpractice project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ligk%x$+)qey=q+&d_nca7%s-_@zn4%g=kg_4+p!ga7n)-4nb@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'modelpractice.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'modelpractice.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| 25.694215
| 91
| 0.696365
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'ligk%x$+)qey=q+&d_nca7%s-_@zn4%g=kg_4+p!ga7n)-4nb@'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'modelpractice.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'modelpractice.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| true
| true
|
f704478779f04dbcbfcffa9eff8c9f6df4e7a789
| 7,764
|
py
|
Python
|
g_packages/deepImpute/docker/deepimpute/deepimpute/multinet.py
|
lanagarmire/granatumx
|
3dee3a8fb2ba851c31a9f6338aef1817217769f9
|
[
"MIT"
] | 1
|
2021-03-04T13:04:28.000Z
|
2021-03-04T13:04:28.000Z
|
g_packages/deepImpute/docker/deepimpute/deepimpute/multinet.py
|
lanagarmire/granatumx
|
3dee3a8fb2ba851c31a9f6338aef1817217769f9
|
[
"MIT"
] | 16
|
2020-01-28T23:03:40.000Z
|
2022-02-10T00:30:16.000Z
|
g_packages/deepImpute/docker/deepimpute/deepimpute/multinet.py
|
lanagarmire/granatumx
|
3dee3a8fb2ba851c31a9f6338aef1817217769f9
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import pandas as pd
import binascii
import warnings
import tempfile
from math import ceil
from multiprocessing import cpu_count, sharedctypes
from multiprocessing.pool import Pool
from sklearn.metrics import r2_score
from deepimpute.net import Net
from deepimpute.normalizer import Normalizer
from deepimpute.util import get_input_genes,get_target_genes
from deepimpute.util import score_model
def newCoreInitializer(arr_to_populate):
global sharedArray
sharedArray = arr_to_populate
def trainNet(in_out, NN_param_i, data_i, labels):
features, targets = in_out
net = Net(**NN_param_i)
net.fit(data_i, targetGenes=targets, predictorGenes=features, labels=labels)
# retrieve the array
params = list(NN_param_i.keys()) + ['targetGenes', 'NNid', 'predictorGenes']
args2return = [(attr, getattr(net, attr)) for attr in params]
return {k: v if k[0] != '_' else (k[1:], v) for k, v in args2return}
def predictNet(data_i, NN_param_i, labels):
net = Net(**NN_param_i)
data_i_ok = pd.DataFrame(np.reshape(data_i, list(map(len, labels))),
index=labels[0], columns=labels[1])
return net.predict(data_i_ok)
def trainOrPredict(args):
in_out, NN_param_i, labels, mode = args
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
data_i = np.ctypeslib.as_array(sharedArray)
if mode == "predict":
return predictNet(data_i, NN_param_i, labels)
return trainNet(in_out, NN_param_i, data_i, labels)
class MultiNet(object):
def __init__(self, n_cores=4, predictorLimit=10, preproc='log_or_exp', runDir=os.path.join(tempfile.gettempdir(),'run'), seed=0, **NN_params):
self._maxcores = n_cores
self.predictorLimit = predictorLimit
self.norm = Normalizer.fromName(preproc)
self.runDir = runDir
self.seed = seed
self.NN_params = NN_params
self.seed = seed
self.NN_params['seed'] = seed
if 'dims' not in self.NN_params.keys():
self.NN_params['dims'] = [20,500]
@property
def maxcores(self):
if self._maxcores == 'all':
return cpu_count()
else:
return self._maxcores
@maxcores.setter
def maxcores(self, value):
self._maxcores = value
def get_params(self, deep=False):
return self.__dict__
def setIDandRundir(self,data):
# set runID
runID = binascii.b2a_hex(os.urandom(5))
if type(runID) is bytes:
runID = runID.decode()
self.NN_params['runDir'] = os.path.join(self.runDir, str(runID))
def getCores(self,NN_genes):
n_runs = int(ceil(1.*len(NN_genes) / self.NN_params['dims'][1]))
n_cores = min(self.maxcores, n_runs)
self.NN_params['n_cores'] = max(1, int(self.maxcores / n_cores))
return n_runs,n_cores
def fit(self, data, NN_lim='auto', cell_subset=None):
np.random.seed(seed=self.seed)
df = pd.DataFrame(data)
self.setIDandRundir(df)
# Change the output dimension if the data has too few genes
if df.shape[1] < self.NN_params['dims'][1]:
self.NN_params['dims'][1] = df.shape[1]
# Choose genes to impute
genes_sort = df.quantile(.99).sort_values(ascending=False)
NN_genes = get_target_genes(genes_sort,NN_lim=NN_lim)
df_to_impute = df[NN_genes]
n_runs,n_cores = self.getCores(NN_genes)
# ------------------------# Subnetworks #------------------------#
predictors = np.intersect1d(genes_sort.index[genes_sort>self.predictorLimit], NN_genes)
print('Using {} genes as potential predictors'.format(len(predictors)))
n_choose = int(len(NN_genes)/self.NN_params['dims'][1])
subGenelists = np.random.choice(NN_genes,
[n_choose, self.NN_params['dims'][1]],
replace=False).tolist()
if n_choose < n_runs:
# Special case: for the last run, the output layer will have less nodes
selectedGenes = np.reshape(subGenelists, -1)
subGenelists.append(np.setdiff1d(NN_genes, selectedGenes).tolist())
# ------------------------# Extracting input genes #------------------------#
corrMatrix = 1 - np.abs(pd.DataFrame(np.corrcoef(df_to_impute.T),
index=NN_genes, columns=NN_genes)[predictors])
in_out_genes = get_input_genes(df_to_impute,self.NN_params['dims'],distanceMatrix=corrMatrix,
targets=subGenelists,predictorLimit=self.predictorLimit)
# ------------------------# Subsets for fitting #------------------------#
n_cells = df_to_impute.shape[0]
if type(cell_subset) is float or cell_subset == 1:
n_cells = int(cell_subset * n_cells)
elif type(cell_subset) is int:
n_cells = cell_subset
self.trainCells = df_to_impute.sample(n_cells,replace=False).index
print('Starting training with {} cells ({:.1%}) on {} threads ({} cores/thread).'.
format(n_cells, 1.*n_cells/df_to_impute.shape[0], n_cores, self.NN_params['n_cores']))
# -------------------# Preprocessing (if any) #--------------------#
df_to_impute = self.norm.fit(df_to_impute).transform(df_to_impute)
# -------------------# Share matrix between subprocesses #--------------------#
''' Create memory chunk and put the matrix in it '''
idx, cols = self.trainCells, df_to_impute.columns
trainData = df_to_impute.loc[self.trainCells, :].values
''' Parallelize process with shared array '''
childJobs = [(in_out, self.NN_params, (idx, cols), 'train')
for in_out in in_out_genes]
output_dicts = self.runOnMultipleCores(n_cores, trainData.flatten(), childJobs)
self.networks = []
for dictionnary in output_dicts:
self.networks.append(Net(**dictionnary))
return self
def runOnMultipleCores(self, cores, data, childJobs):
sharedArray = sharedctypes.RawArray('d', data)
pool = Pool(processes=cores, initializer=newCoreInitializer, initargs=(sharedArray,))
output_dicts = pool.map(trainOrPredict, childJobs)
pool.close()
pool.join()
return output_dicts
def predict(self, data, imputed_only=False, restore_pos_values=True):
df = pd.DataFrame(data)
''' Create memory chunk and put the matrix in it '''
idx, cols = df.index, df.columns
df_norm = self.norm.fit(df).transform(df).values.flatten()
''' Parallelize process with shared array '''
childJobs = [((12, 15), net.__dict__, (idx, cols), 'predict')
for net in self.networks]
output_dicts = self.runOnMultipleCores(self.maxcores, df_norm, childJobs)
Y_imputed = pd.concat(output_dicts, axis=1)
Y_not_imputed = df[[gene for gene in df.columns if gene not in Y_imputed.columns]]
Y_total = self.norm.transform(pd.concat([Y_imputed, Y_not_imputed], axis=1)[df.columns],
rev=True)
if restore_pos_values:
Y_total = Y_total.mask(df>0,df)
if imputed_only:
Y_total = Y_total[Y_imputed.columns]
if type(data) == type(pd.DataFrame()):
return Y_total
else:
return Y_total.values
def score(self, data, metric=r2_score):
imputedGenes = list(zip(*[ net.targetGenes for net in self.networks ]))
return score_model(self,pd.DataFrame(data),metric=r2_score, cols=imputedGenes)
| 36.971429
| 146
| 0.618367
|
import os
import numpy as np
import pandas as pd
import binascii
import warnings
import tempfile
from math import ceil
from multiprocessing import cpu_count, sharedctypes
from multiprocessing.pool import Pool
from sklearn.metrics import r2_score
from deepimpute.net import Net
from deepimpute.normalizer import Normalizer
from deepimpute.util import get_input_genes,get_target_genes
from deepimpute.util import score_model
def newCoreInitializer(arr_to_populate):
global sharedArray
sharedArray = arr_to_populate
def trainNet(in_out, NN_param_i, data_i, labels):
features, targets = in_out
net = Net(**NN_param_i)
net.fit(data_i, targetGenes=targets, predictorGenes=features, labels=labels)
params = list(NN_param_i.keys()) + ['targetGenes', 'NNid', 'predictorGenes']
args2return = [(attr, getattr(net, attr)) for attr in params]
return {k: v if k[0] != '_' else (k[1:], v) for k, v in args2return}
def predictNet(data_i, NN_param_i, labels):
net = Net(**NN_param_i)
data_i_ok = pd.DataFrame(np.reshape(data_i, list(map(len, labels))),
index=labels[0], columns=labels[1])
return net.predict(data_i_ok)
def trainOrPredict(args):
in_out, NN_param_i, labels, mode = args
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
data_i = np.ctypeslib.as_array(sharedArray)
if mode == "predict":
return predictNet(data_i, NN_param_i, labels)
return trainNet(in_out, NN_param_i, data_i, labels)
class MultiNet(object):
def __init__(self, n_cores=4, predictorLimit=10, preproc='log_or_exp', runDir=os.path.join(tempfile.gettempdir(),'run'), seed=0, **NN_params):
self._maxcores = n_cores
self.predictorLimit = predictorLimit
self.norm = Normalizer.fromName(preproc)
self.runDir = runDir
self.seed = seed
self.NN_params = NN_params
self.seed = seed
self.NN_params['seed'] = seed
if 'dims' not in self.NN_params.keys():
self.NN_params['dims'] = [20,500]
@property
def maxcores(self):
if self._maxcores == 'all':
return cpu_count()
else:
return self._maxcores
@maxcores.setter
def maxcores(self, value):
self._maxcores = value
def get_params(self, deep=False):
return self.__dict__
def setIDandRundir(self,data):
runID = binascii.b2a_hex(os.urandom(5))
if type(runID) is bytes:
runID = runID.decode()
self.NN_params['runDir'] = os.path.join(self.runDir, str(runID))
def getCores(self,NN_genes):
n_runs = int(ceil(1.*len(NN_genes) / self.NN_params['dims'][1]))
n_cores = min(self.maxcores, n_runs)
self.NN_params['n_cores'] = max(1, int(self.maxcores / n_cores))
return n_runs,n_cores
def fit(self, data, NN_lim='auto', cell_subset=None):
np.random.seed(seed=self.seed)
df = pd.DataFrame(data)
self.setIDandRundir(df)
if df.shape[1] < self.NN_params['dims'][1]:
self.NN_params['dims'][1] = df.shape[1]
genes_sort = df.quantile(.99).sort_values(ascending=False)
NN_genes = get_target_genes(genes_sort,NN_lim=NN_lim)
df_to_impute = df[NN_genes]
n_runs,n_cores = self.getCores(NN_genes)
predictors = np.intersect1d(genes_sort.index[genes_sort>self.predictorLimit], NN_genes)
print('Using {} genes as potential predictors'.format(len(predictors)))
n_choose = int(len(NN_genes)/self.NN_params['dims'][1])
subGenelists = np.random.choice(NN_genes,
[n_choose, self.NN_params['dims'][1]],
replace=False).tolist()
if n_choose < n_runs:
selectedGenes = np.reshape(subGenelists, -1)
subGenelists.append(np.setdiff1d(NN_genes, selectedGenes).tolist())
corrMatrix = 1 - np.abs(pd.DataFrame(np.corrcoef(df_to_impute.T),
index=NN_genes, columns=NN_genes)[predictors])
in_out_genes = get_input_genes(df_to_impute,self.NN_params['dims'],distanceMatrix=corrMatrix,
targets=subGenelists,predictorLimit=self.predictorLimit)
n_cells = df_to_impute.shape[0]
if type(cell_subset) is float or cell_subset == 1:
n_cells = int(cell_subset * n_cells)
elif type(cell_subset) is int:
n_cells = cell_subset
self.trainCells = df_to_impute.sample(n_cells,replace=False).index
print('Starting training with {} cells ({:.1%}) on {} threads ({} cores/thread).'.
format(n_cells, 1.*n_cells/df_to_impute.shape[0], n_cores, self.NN_params['n_cores']))
df_to_impute = self.norm.fit(df_to_impute).transform(df_to_impute)
idx, cols = self.trainCells, df_to_impute.columns
trainData = df_to_impute.loc[self.trainCells, :].values
childJobs = [(in_out, self.NN_params, (idx, cols), 'train')
for in_out in in_out_genes]
output_dicts = self.runOnMultipleCores(n_cores, trainData.flatten(), childJobs)
self.networks = []
for dictionnary in output_dicts:
self.networks.append(Net(**dictionnary))
return self
def runOnMultipleCores(self, cores, data, childJobs):
sharedArray = sharedctypes.RawArray('d', data)
pool = Pool(processes=cores, initializer=newCoreInitializer, initargs=(sharedArray,))
output_dicts = pool.map(trainOrPredict, childJobs)
pool.close()
pool.join()
return output_dicts
def predict(self, data, imputed_only=False, restore_pos_values=True):
df = pd.DataFrame(data)
idx, cols = df.index, df.columns
df_norm = self.norm.fit(df).transform(df).values.flatten()
childJobs = [((12, 15), net.__dict__, (idx, cols), 'predict')
for net in self.networks]
output_dicts = self.runOnMultipleCores(self.maxcores, df_norm, childJobs)
Y_imputed = pd.concat(output_dicts, axis=1)
Y_not_imputed = df[[gene for gene in df.columns if gene not in Y_imputed.columns]]
Y_total = self.norm.transform(pd.concat([Y_imputed, Y_not_imputed], axis=1)[df.columns],
rev=True)
if restore_pos_values:
Y_total = Y_total.mask(df>0,df)
if imputed_only:
Y_total = Y_total[Y_imputed.columns]
if type(data) == type(pd.DataFrame()):
return Y_total
else:
return Y_total.values
def score(self, data, metric=r2_score):
imputedGenes = list(zip(*[ net.targetGenes for net in self.networks ]))
return score_model(self,pd.DataFrame(data),metric=r2_score, cols=imputedGenes)
| true
| true
|
f70447f40f7fdf7642cec82e378beb85149aa765
| 8,476
|
py
|
Python
|
dynamic_dynamodb/config/command_line_parser.py
|
ponprathip/dynamic-dynamodb
|
f0968215f606b9ff464fc4b633f01df60a8745b2
|
[
"Apache-2.0"
] | null | null | null |
dynamic_dynamodb/config/command_line_parser.py
|
ponprathip/dynamic-dynamodb
|
f0968215f606b9ff464fc4b633f01df60a8745b2
|
[
"Apache-2.0"
] | null | null | null |
dynamic_dynamodb/config/command_line_parser.py
|
ponprathip/dynamic-dynamodb
|
f0968215f606b9ff464fc4b633f01df60a8745b2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Command line configuration parser """
import sys
import os.path
import argparse
import configparser
def parse():
""" Parse command line options """
parser = argparse.ArgumentParser(
description='Dynamic DynamoDB - Auto provisioning AWS DynamoDB')
parser.add_argument(
'-c', '--config',
help='Read configuration from a configuration file')
parser.add_argument(
'--dry-run',
action='store_true',
help='Run without making any changes to your DynamoDB table')
parser.add_argument(
'--run-once',
action='store_true',
help='Run once and then exit Dynamic DynamoDB, instead of looping')
parser.add_argument(
'--show-config',
action='store_true',
help='Parse config files, print parsed data and then exit Dynamic DynamoDB')
parser.add_argument(
'--check-interval',
type=int,
help="""How many seconds should we wait between
the checks (default: 300)""")
parser.add_argument(
'--log-file',
help='Send output to the given log file')
parser.add_argument(
'--log-level',
choices=['debug', 'info', 'warning', 'error'],
help='Log level to use (default: info)')
parser.add_argument(
'--log-config-file',
help=(
'Use a custom Python logging configuration file. Overrides both '
'--log-level and --log-file.'
))
parser.add_argument(
'--version',
action='store_true',
help='Print current version number')
parser.add_argument(
'--aws-access-key-id',
help="Override Boto configuration with the following AWS access key")
parser.add_argument(
'--aws-secret-access-key',
help="Override Boto configuration with the following AWS secret key")
daemon_ag = parser.add_argument_group('Daemon options')
daemon_ag.add_argument(
'--daemon',
help=(
'Run Dynamic DynamoDB in daemon mode. Valid modes are '
'[start|stop|restart|foreground]'))
daemon_ag.add_argument(
'--instance',
default='default',
help=(
'Name of the Dynamic DynamoDB instance. '
'Used to run multiple instances of Dynamic DynamoDB. '
'Give each instance a unique name and control them separately '
'with the --daemon flag. (default: default)'))
daemon_ag.add_argument(
'--pid-file-dir',
default='/tmp',
help='Directory where pid file is located in. Defaults to /tmp')
dynamodb_ag = parser.add_argument_group('DynamoDB options')
dynamodb_ag.add_argument(
'-r', '--region',
help='AWS region to operate in (default: us-east-1')
dynamodb_ag.add_argument(
'-t', '--table-name',
help=(
'Table(s) to target. '
'The name is treated as a regular expression. '
'E.g. "^my_table.*$" or "my_table"'))
r_scaling_ag = parser.add_argument_group('Read units scaling properties')
r_scaling_ag.add_argument(
'--reads-upper-threshold',
type=int,
help="""Scale up the reads with --increase-reads-with if
the currently consumed read units reaches this many
percent (default: 90)""")
r_scaling_ag.add_argument(
'--throttled-reads-upper-threshold',
type=int,
help="""Scale up the reads with --increase-reads-with if
the count of throttled read events exceeds this
count (default: 0)""")
r_scaling_ag.add_argument(
'--reads-lower-threshold',
type=int,
help="""Scale down the reads with --decrease-reads-with if the
currently consumed read units is as low as this
percentage (default: 30)""")
r_scaling_ag.add_argument(
'--increase-reads-with',
type=int,
help="""How much should we increase the read units with?
(default: 50, max: 100 if --increase-reads-unit = percent)""")
r_scaling_ag.add_argument(
'--decrease-reads-with',
type=int,
help="""How much should we decrease the read units with?
(default: 50)""")
r_scaling_ag.add_argument(
'--increase-reads-unit',
type=str,
help='Do you want to scale in percent or units? (default: percent)')
r_scaling_ag.add_argument(
'--decrease-reads-unit',
type=str,
help='Do you want to scale in percent or units? (default: percent)')
r_scaling_ag.add_argument(
'--min-provisioned-reads',
type=int,
help="""Minimum number of provisioned reads""")
r_scaling_ag.add_argument(
'--max-provisioned-reads',
type=int,
help="""Maximum number of provisioned reads""")
r_scaling_ag.add_argument(
'--num-read-checks-before-scale-down',
type=int,
help="""Number of consecutive checks that must meet criteria
before a scale down event occurs""")
r_scaling_ag.add_argument(
'--num-read-checks-reset-percent',
type=int,
help="""Percentage Value that will cause the num_read_checks_before
scale_down var to reset back to 0""")
w_scaling_ag = parser.add_argument_group('Write units scaling properties')
w_scaling_ag.add_argument(
'--writes-upper-threshold',
type=int,
help="""Scale up the writes with --increase-writes-with
if the currently consumed write units reaches this
many percent (default: 90)""")
w_scaling_ag.add_argument(
'--throttled-writes-upper-threshold',
type=int,
help="""Scale up the reads with --increase-writes-with if
the count of throttled write events exceeds this
count (default: 0)""")
w_scaling_ag.add_argument(
'--writes-lower-threshold',
type=int,
help="""Scale down the writes with --decrease-writes-with
if the currently consumed write units is as low as this
percentage (default: 30)""")
w_scaling_ag.add_argument(
'--increase-writes-with',
type=int,
help="""How much should we increase the write units with?
(default: 50,
max: 100 if --increase-writes-unit = 'percent')""")
w_scaling_ag.add_argument(
'--decrease-writes-with',
type=int,
help="""How much should we decrease the write units with?
(default: 50)""")
w_scaling_ag.add_argument(
'--increase-writes-unit',
type=str,
help='Do you want to scale in percent or units? (default: percent)')
w_scaling_ag.add_argument(
'--decrease-writes-unit',
type=str,
help='Do you want to scale in percent or units? (default: percent)')
w_scaling_ag.add_argument(
'--min-provisioned-writes',
type=int,
help="""Minimum number of provisioned writes""")
w_scaling_ag.add_argument(
'--max-provisioned-writes',
type=int,
help="""Maximum number of provisioned writes""")
w_scaling_ag.add_argument(
'--num-write-checks-before-scale-down',
type=int,
help="""Number of consecutive checks that must meet criteria
before a scale down event occurs""")
w_scaling_ag.add_argument(
'--num-write-checks-reset-percent',
type=int,
help="""Percentage Value that will cause the num_write_checks_before
scale_down var to reset back to 0""")
args = parser.parse_args()
# Print the version and quit
if args.version:
# Read the dynamic-dynamodb.conf configuration file
internal_config_file = configparser.RawConfigParser()
internal_config_file.optionxform = lambda option: option
internal_config_file.read(
os.path.abspath(
os.path.join(
os.path.dirname(__file__), '../dynamic-dynamodb.conf')))
print('Dynamic DynamoDB version: {0}'.format(
internal_config_file.get('general', 'version')))
sys.exit(0)
# Replace any new values in the configuration
configuration = {}
for arg in args.__dict__:
if args.__dict__.get(arg) is not None:
configuration[arg] = args.__dict__.get(arg)
return configuration
| 38.880734
| 84
| 0.607126
|
import sys
import os.path
import argparse
import configparser
def parse():
parser = argparse.ArgumentParser(
description='Dynamic DynamoDB - Auto provisioning AWS DynamoDB')
parser.add_argument(
'-c', '--config',
help='Read configuration from a configuration file')
parser.add_argument(
'--dry-run',
action='store_true',
help='Run without making any changes to your DynamoDB table')
parser.add_argument(
'--run-once',
action='store_true',
help='Run once and then exit Dynamic DynamoDB, instead of looping')
parser.add_argument(
'--show-config',
action='store_true',
help='Parse config files, print parsed data and then exit Dynamic DynamoDB')
parser.add_argument(
'--check-interval',
type=int,
help="""How many seconds should we wait between
the checks (default: 300)""")
parser.add_argument(
'--log-file',
help='Send output to the given log file')
parser.add_argument(
'--log-level',
choices=['debug', 'info', 'warning', 'error'],
help='Log level to use (default: info)')
parser.add_argument(
'--log-config-file',
help=(
'Use a custom Python logging configuration file. Overrides both '
'--log-level and --log-file.'
))
parser.add_argument(
'--version',
action='store_true',
help='Print current version number')
parser.add_argument(
'--aws-access-key-id',
help="Override Boto configuration with the following AWS access key")
parser.add_argument(
'--aws-secret-access-key',
help="Override Boto configuration with the following AWS secret key")
daemon_ag = parser.add_argument_group('Daemon options')
daemon_ag.add_argument(
'--daemon',
help=(
'Run Dynamic DynamoDB in daemon mode. Valid modes are '
'[start|stop|restart|foreground]'))
daemon_ag.add_argument(
'--instance',
default='default',
help=(
'Name of the Dynamic DynamoDB instance. '
'Used to run multiple instances of Dynamic DynamoDB. '
'Give each instance a unique name and control them separately '
'with the --daemon flag. (default: default)'))
daemon_ag.add_argument(
'--pid-file-dir',
default='/tmp',
help='Directory where pid file is located in. Defaults to /tmp')
dynamodb_ag = parser.add_argument_group('DynamoDB options')
dynamodb_ag.add_argument(
'-r', '--region',
help='AWS region to operate in (default: us-east-1')
dynamodb_ag.add_argument(
'-t', '--table-name',
help=(
'Table(s) to target. '
'The name is treated as a regular expression. '
'E.g. "^my_table.*$" or "my_table"'))
r_scaling_ag = parser.add_argument_group('Read units scaling properties')
r_scaling_ag.add_argument(
'--reads-upper-threshold',
type=int,
help="""Scale up the reads with --increase-reads-with if
the currently consumed read units reaches this many
percent (default: 90)""")
r_scaling_ag.add_argument(
'--throttled-reads-upper-threshold',
type=int,
help="""Scale up the reads with --increase-reads-with if
the count of throttled read events exceeds this
count (default: 0)""")
r_scaling_ag.add_argument(
'--reads-lower-threshold',
type=int,
help="""Scale down the reads with --decrease-reads-with if the
currently consumed read units is as low as this
percentage (default: 30)""")
r_scaling_ag.add_argument(
'--increase-reads-with',
type=int,
help="""How much should we increase the read units with?
(default: 50, max: 100 if --increase-reads-unit = percent)""")
r_scaling_ag.add_argument(
'--decrease-reads-with',
type=int,
help="""How much should we decrease the read units with?
(default: 50)""")
r_scaling_ag.add_argument(
'--increase-reads-unit',
type=str,
help='Do you want to scale in percent or units? (default: percent)')
r_scaling_ag.add_argument(
'--decrease-reads-unit',
type=str,
help='Do you want to scale in percent or units? (default: percent)')
r_scaling_ag.add_argument(
'--min-provisioned-reads',
type=int,
help="""Minimum number of provisioned reads""")
r_scaling_ag.add_argument(
'--max-provisioned-reads',
type=int,
help="""Maximum number of provisioned reads""")
r_scaling_ag.add_argument(
'--num-read-checks-before-scale-down',
type=int,
help="""Number of consecutive checks that must meet criteria
before a scale down event occurs""")
r_scaling_ag.add_argument(
'--num-read-checks-reset-percent',
type=int,
help="""Percentage Value that will cause the num_read_checks_before
scale_down var to reset back to 0""")
w_scaling_ag = parser.add_argument_group('Write units scaling properties')
w_scaling_ag.add_argument(
'--writes-upper-threshold',
type=int,
help="""Scale up the writes with --increase-writes-with
if the currently consumed write units reaches this
many percent (default: 90)""")
w_scaling_ag.add_argument(
'--throttled-writes-upper-threshold',
type=int,
help="""Scale up the reads with --increase-writes-with if
the count of throttled write events exceeds this
count (default: 0)""")
w_scaling_ag.add_argument(
'--writes-lower-threshold',
type=int,
help="""Scale down the writes with --decrease-writes-with
if the currently consumed write units is as low as this
percentage (default: 30)""")
w_scaling_ag.add_argument(
'--increase-writes-with',
type=int,
help="""How much should we increase the write units with?
(default: 50,
max: 100 if --increase-writes-unit = 'percent')""")
w_scaling_ag.add_argument(
'--decrease-writes-with',
type=int,
help="""How much should we decrease the write units with?
(default: 50)""")
w_scaling_ag.add_argument(
'--increase-writes-unit',
type=str,
help='Do you want to scale in percent or units? (default: percent)')
w_scaling_ag.add_argument(
'--decrease-writes-unit',
type=str,
help='Do you want to scale in percent or units? (default: percent)')
w_scaling_ag.add_argument(
'--min-provisioned-writes',
type=int,
help="""Minimum number of provisioned writes""")
w_scaling_ag.add_argument(
'--max-provisioned-writes',
type=int,
help="""Maximum number of provisioned writes""")
w_scaling_ag.add_argument(
'--num-write-checks-before-scale-down',
type=int,
help="""Number of consecutive checks that must meet criteria
before a scale down event occurs""")
w_scaling_ag.add_argument(
'--num-write-checks-reset-percent',
type=int,
help="""Percentage Value that will cause the num_write_checks_before
scale_down var to reset back to 0""")
args = parser.parse_args()
if args.version:
internal_config_file = configparser.RawConfigParser()
internal_config_file.optionxform = lambda option: option
internal_config_file.read(
os.path.abspath(
os.path.join(
os.path.dirname(__file__), '../dynamic-dynamodb.conf')))
print('Dynamic DynamoDB version: {0}'.format(
internal_config_file.get('general', 'version')))
sys.exit(0)
configuration = {}
for arg in args.__dict__:
if args.__dict__.get(arg) is not None:
configuration[arg] = args.__dict__.get(arg)
return configuration
| true
| true
|
f7044a279a20984e104ff69ddf76ab1cc5fa13be
| 4,076
|
py
|
Python
|
tvizbase/rpc_client.py
|
inov8ru/thallid-viz
|
302a44f8af257edad8a5d11be19fc423fe51b89c
|
[
"MIT"
] | 3
|
2019-09-27T15:21:14.000Z
|
2019-10-24T15:13:50.000Z
|
tvizbase/rpc_client.py
|
inov8ru/thallid-viz
|
302a44f8af257edad8a5d11be19fc423fe51b89c
|
[
"MIT"
] | null | null | null |
tvizbase/rpc_client.py
|
inov8ru/thallid-viz
|
302a44f8af257edad8a5d11be19fc423fe51b89c
|
[
"MIT"
] | 1
|
2022-02-12T16:27:05.000Z
|
2022-02-12T16:27:05.000Z
|
# -*- coding: utf-8 -*-
from requests import Session
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError
import json
from time import sleep, time
from pprint import pprint
from itertools import cycle
from .storage import nodes, api_total
#from .proxy import Proxy
class Http():
http = Session()
proxies = None
class RpcClient(Http):
RPS_DELAY = 0.10 # ~3 requests per second
last_request = 0.0
""" Simple Steem JSON-RPC API
This class serves as an abstraction layer for easy use of the Steem API.
rpc = RpcClient(nodes=nodes) or rpc = RpcClient()
Args:
nodes (list): A list of Steem HTTP RPC nodes to connect to.
any call available to that port can be issued using the instance
rpc.call('command', *parameters)
"""
headers = {'User-Agent': 'thallid', 'content-type': 'application/json'}
def __init__(self, report=False, **kwargs):
self.api_total = api_total
self.report = report
self.PROXY = kwargs.get("PROXY", False)
if self.PROXY: self.proxies = Proxy()
self.nodes = cycle(kwargs.get("nodes", nodes)) # Перебор нод
self.url = next(self.nodes)
self.num_retries = kwargs.get("num_retries", 3) # Количество попыток подключения к ноде
adapter = HTTPAdapter(max_retries=self.num_retries)
for node in nodes:
self.http.mount(node, adapter)
def get_response(self, payload):
data = json.dumps(payload, ensure_ascii=False).encode('utf8')
while True:
n = 1
proxies = self.proxies.get_http() if self.PROXY else None
while n < self.num_retries:
try:
# Ограничение по запросам в секунду
delay = self.RPS_DELAY - (time() - self.last_request)
if delay > 0: sleep(delay)
#response = self.http.post(self.url, data=data, headers=self.headers, proxies=proxies, auth=auth)
response = self.http.post(self.url, data=data, headers=self.headers, proxies=proxies, timeout=30)
self.last_request = time()
if response.status_code == 503:
proxies = self.proxies.new_http() if self.PROXY else None # next proxy
print('new proxy', proxies)
else:
return response
#except ConnectionError as ce:
except:
#print('ce', ce)
sleeptime = (n - 1) * 2
if self.report:
print("Lost connection to node during rpcconnect(): %s (%d/%d) " % (self.url, n, self.num_retries))
print("Retrying in %d seconds" % sleeptime)
sleep(sleeptime)
n += 1
self.url = next(self.nodes) # next node
print("Trying to connect to node %s" % self.url, 'error in get_response rpc_client', proxies)
return False
def call(self, name, *params, **kwargs):
# Определяем для name своё api
api = self.api_total[name]
#method = kwargs.get('method', 'condenser_api.') #steem
method = kwargs.get('method', 'call')
parameters = kwargs.get('params', [api, name, params])
#payload = {"method": method + name, "params": parameters, "id": 1, "jsonrpc": '2.0'} #steem
payload = {"method": method, "params": parameters, "id": 1, "jsonrpc": '2.0'}
result = None
n = 1
while n < self.num_retries:
response = self.get_response(payload)
if response:
if response.status_code == 200:
try:
res = response.json()
if 'error' in res:
if self.report:
#pprint(res["error"]["message"])
print('ERROR IN RES', res["error"]["message"])
else:
result = res["result"]
break
except:
print('ERROR JSON', response)
#elif response.status_code == 503:
# proxies = self.proxies.new_http() if self.PROXY else None # next proxy
# print('new proxy', proxies)
else:
if self.report:
print(n, 'ERROR status_code', response.status_code, response.text)
else:
print('not connection to node', self.url)
print('response', response)
n += 1
self.url = next(self.nodes) # next node
sleep(n * 2)
print("Trying to connect to node %s" % self.url, 'for method', name)
return result
#----- main -----
if __name__ == '__main__':
pass
| 27.540541
| 105
| 0.648921
|
from requests import Session
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError
import json
from time import sleep, time
from pprint import pprint
from itertools import cycle
from .storage import nodes, api_total
class Http():
http = Session()
proxies = None
class RpcClient(Http):
RPS_DELAY = 0.10 last_request = 0.0
headers = {'User-Agent': 'thallid', 'content-type': 'application/json'}
def __init__(self, report=False, **kwargs):
self.api_total = api_total
self.report = report
self.PROXY = kwargs.get("PROXY", False)
if self.PROXY: self.proxies = Proxy()
self.nodes = cycle(kwargs.get("nodes", nodes)) self.url = next(self.nodes)
self.num_retries = kwargs.get("num_retries", 3) adapter = HTTPAdapter(max_retries=self.num_retries)
for node in nodes:
self.http.mount(node, adapter)
def get_response(self, payload):
data = json.dumps(payload, ensure_ascii=False).encode('utf8')
while True:
n = 1
proxies = self.proxies.get_http() if self.PROXY else None
while n < self.num_retries:
try:
delay = self.RPS_DELAY - (time() - self.last_request)
if delay > 0: sleep(delay)
response = self.http.post(self.url, data=data, headers=self.headers, proxies=proxies, timeout=30)
self.last_request = time()
if response.status_code == 503:
proxies = self.proxies.new_http() if self.PROXY else None print('new proxy', proxies)
else:
return response
except:
sleeptime = (n - 1) * 2
if self.report:
print("Lost connection to node during rpcconnect(): %s (%d/%d) " % (self.url, n, self.num_retries))
print("Retrying in %d seconds" % sleeptime)
sleep(sleeptime)
n += 1
self.url = next(self.nodes) print("Trying to connect to node %s" % self.url, 'error in get_response rpc_client', proxies)
return False
def call(self, name, *params, **kwargs):
api = self.api_total[name]
method = kwargs.get('method', 'call')
parameters = kwargs.get('params', [api, name, params])
payload = {"method": method, "params": parameters, "id": 1, "jsonrpc": '2.0'}
result = None
n = 1
while n < self.num_retries:
response = self.get_response(payload)
if response:
if response.status_code == 200:
try:
res = response.json()
if 'error' in res:
if self.report:
print('ERROR IN RES', res["error"]["message"])
else:
result = res["result"]
break
except:
print('ERROR JSON', response)
else:
if self.report:
print(n, 'ERROR status_code', response.status_code, response.text)
else:
print('not connection to node', self.url)
print('response', response)
n += 1
self.url = next(self.nodes) sleep(n * 2)
print("Trying to connect to node %s" % self.url, 'for method', name)
return result
if __name__ == '__main__':
pass
| true
| true
|
f7044a71ed7e9f453f633fd06fffede821afd456
| 3,600
|
py
|
Python
|
tests/integration/projects/general/service.py
|
DrizzlingCattus/BentoML
|
3ca0cc134c72d92e2e806113df1677e38f2567e0
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/projects/general/service.py
|
DrizzlingCattus/BentoML
|
3ca0cc134c72d92e2e806113df1677e38f2567e0
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/projects/general/service.py
|
DrizzlingCattus/BentoML
|
3ca0cc134c72d92e2e806113df1677e38f2567e0
|
[
"Apache-2.0"
] | null | null | null |
import json
import pathlib
import sys
import time
from typing import Sequence
import bentoml
from bentoml.adapters import (
DataframeInput,
FileInput,
ImageInput,
JsonInput,
MultiImageInput,
)
from bentoml.frameworks.sklearn import SklearnModelArtifact
from bentoml.handlers import DataframeHandler # deprecated
from bentoml.service.artifacts.pickle import PickleArtifact
from bentoml.types import InferenceResult, InferenceTask
@bentoml.env(infer_pip_packages=True)
@bentoml.artifacts([PickleArtifact("model"), SklearnModelArtifact('sk_model')])
class ExampleService(bentoml.BentoService):
"""
Example BentoService class made for testing purpose
"""
@bentoml.api(
input=DataframeInput(dtype={"col1": "int"}),
mb_max_latency=1000,
mb_max_batch_size=2000,
batch=True,
)
def predict_dataframe(self, df):
return self.artifacts.model.predict_dataframe(df)
@bentoml.api(DataframeHandler, dtype={"col1": "int"}, batch=True) # deprecated
def predict_dataframe_v1(self, df):
return self.artifacts.model.predict_dataframe(df)
@bentoml.api(
input=MultiImageInput(input_names=('original', 'compared')), batch=True
)
def predict_multi_images(self, originals, compareds):
return self.artifacts.model.predict_multi_images(originals, compareds)
@bentoml.api(input=ImageInput(), batch=True)
def predict_image(self, images):
return self.artifacts.model.predict_image(images)
@bentoml.api(
input=JsonInput(), mb_max_latency=1000, mb_max_batch_size=2000, batch=True,
)
def predict_with_sklearn(self, jsons):
return self.artifacts.sk_model.predict(jsons)
@bentoml.api(input=FileInput(), batch=True)
def predict_file(self, files):
return self.artifacts.model.predict_file(files)
@bentoml.api(input=JsonInput(), batch=True)
def predict_json(self, input_datas):
return self.artifacts.model.predict_json(input_datas)
@bentoml.api(input=JsonInput(), batch=True)
def predict_strict_json(self, input_datas, tasks: Sequence[InferenceTask] = None):
filtered_jsons = []
for j, t in zip(input_datas, tasks):
if t.http_headers.content_type != "application/json":
t.discard(http_status=400, err_msg="application/json only")
else:
filtered_jsons.append(j)
return self.artifacts.model.predict_json(filtered_jsons)
@bentoml.api(input=JsonInput(), batch=True)
def predict_direct_json(self, input_datas, tasks: Sequence[InferenceTask] = None):
filtered_jsons = []
for j, t in zip(input_datas, tasks):
if t.http_headers.content_type != "application/json":
t.discard(http_status=400, err_msg="application/json only")
else:
filtered_jsons.append(j)
rets = self.artifacts.model.predict_json(filtered_jsons)
return [
InferenceResult(http_status=200, data=json.dumps(result)) for result in rets
]
@bentoml.api(input=JsonInput(), mb_max_latency=10000 * 1000, batch=True)
def echo_with_delay(self, input_datas):
data = input_datas[0]
time.sleep(data['b'] + data['a'] * len(input_datas))
return input_datas
if __name__ == "__main__":
artifacts_path = sys.argv[1]
bento_dist_path = sys.argv[2]
service = ExampleService()
service.artifacts.load_all(artifacts_path)
pathlib.Path(bento_dist_path).mkdir(parents=True, exist_ok=True)
service.save_to_dir(bento_dist_path)
| 34.951456
| 88
| 0.695
|
import json
import pathlib
import sys
import time
from typing import Sequence
import bentoml
from bentoml.adapters import (
DataframeInput,
FileInput,
ImageInput,
JsonInput,
MultiImageInput,
)
from bentoml.frameworks.sklearn import SklearnModelArtifact
from bentoml.handlers import DataframeHandler from bentoml.service.artifacts.pickle import PickleArtifact
from bentoml.types import InferenceResult, InferenceTask
@bentoml.env(infer_pip_packages=True)
@bentoml.artifacts([PickleArtifact("model"), SklearnModelArtifact('sk_model')])
class ExampleService(bentoml.BentoService):
@bentoml.api(
input=DataframeInput(dtype={"col1": "int"}),
mb_max_latency=1000,
mb_max_batch_size=2000,
batch=True,
)
def predict_dataframe(self, df):
return self.artifacts.model.predict_dataframe(df)
@bentoml.api(DataframeHandler, dtype={"col1": "int"}, batch=True) def predict_dataframe_v1(self, df):
return self.artifacts.model.predict_dataframe(df)
@bentoml.api(
input=MultiImageInput(input_names=('original', 'compared')), batch=True
)
def predict_multi_images(self, originals, compareds):
return self.artifacts.model.predict_multi_images(originals, compareds)
@bentoml.api(input=ImageInput(), batch=True)
def predict_image(self, images):
return self.artifacts.model.predict_image(images)
@bentoml.api(
input=JsonInput(), mb_max_latency=1000, mb_max_batch_size=2000, batch=True,
)
def predict_with_sklearn(self, jsons):
return self.artifacts.sk_model.predict(jsons)
@bentoml.api(input=FileInput(), batch=True)
def predict_file(self, files):
return self.artifacts.model.predict_file(files)
@bentoml.api(input=JsonInput(), batch=True)
def predict_json(self, input_datas):
return self.artifacts.model.predict_json(input_datas)
@bentoml.api(input=JsonInput(), batch=True)
def predict_strict_json(self, input_datas, tasks: Sequence[InferenceTask] = None):
filtered_jsons = []
for j, t in zip(input_datas, tasks):
if t.http_headers.content_type != "application/json":
t.discard(http_status=400, err_msg="application/json only")
else:
filtered_jsons.append(j)
return self.artifacts.model.predict_json(filtered_jsons)
@bentoml.api(input=JsonInput(), batch=True)
def predict_direct_json(self, input_datas, tasks: Sequence[InferenceTask] = None):
filtered_jsons = []
for j, t in zip(input_datas, tasks):
if t.http_headers.content_type != "application/json":
t.discard(http_status=400, err_msg="application/json only")
else:
filtered_jsons.append(j)
rets = self.artifacts.model.predict_json(filtered_jsons)
return [
InferenceResult(http_status=200, data=json.dumps(result)) for result in rets
]
@bentoml.api(input=JsonInput(), mb_max_latency=10000 * 1000, batch=True)
def echo_with_delay(self, input_datas):
data = input_datas[0]
time.sleep(data['b'] + data['a'] * len(input_datas))
return input_datas
if __name__ == "__main__":
artifacts_path = sys.argv[1]
bento_dist_path = sys.argv[2]
service = ExampleService()
service.artifacts.load_all(artifacts_path)
pathlib.Path(bento_dist_path).mkdir(parents=True, exist_ok=True)
service.save_to_dir(bento_dist_path)
| true
| true
|
f7044b4832232e3b67064f3a65d9a30b120554fc
| 236
|
py
|
Python
|
scripts/make_gifs.py
|
bchao1/stereo-magnification
|
031376675430a459f4bde768eb5c652f1d22a0a4
|
[
"Apache-2.0"
] | null | null | null |
scripts/make_gifs.py
|
bchao1/stereo-magnification
|
031376675430a459f4bde768eb5c652f1d22a0a4
|
[
"Apache-2.0"
] | null | null | null |
scripts/make_gifs.py
|
bchao1/stereo-magnification
|
031376675430a459f4bde768eb5c652f1d22a0a4
|
[
"Apache-2.0"
] | null | null | null |
from PIL import Image
images = []
for i in range(9):
images.append(Image.open(f"../examples/lf/results/render_0{i}_{i}.0.png"))
images[0].save("../examples/lf/out.gif", save_all=True, append_images=images[1:], duration=100, loop=0)
| 39.333333
| 103
| 0.699153
|
from PIL import Image
images = []
for i in range(9):
images.append(Image.open(f"../examples/lf/results/render_0{i}_{i}.0.png"))
images[0].save("../examples/lf/out.gif", save_all=True, append_images=images[1:], duration=100, loop=0)
| true
| true
|
f7044ca3e16964f42ff76f78419427012e7f0013
| 8,104
|
py
|
Python
|
src/waldur_vmware/models.py
|
geant-multicloud/MCMS-mastermind
|
81333180f5e56a0bc88d7dad448505448e01f24e
|
[
"MIT"
] | 26
|
2017-10-18T13:49:58.000Z
|
2021-09-19T04:44:09.000Z
|
src/waldur_vmware/models.py
|
geant-multicloud/MCMS-mastermind
|
81333180f5e56a0bc88d7dad448505448e01f24e
|
[
"MIT"
] | 14
|
2018-12-10T14:14:51.000Z
|
2021-06-07T10:33:39.000Z
|
src/waldur_vmware/models.py
|
geant-multicloud/MCMS-mastermind
|
81333180f5e56a0bc88d7dad448505448e01f24e
|
[
"MIT"
] | 32
|
2017-09-24T03:10:45.000Z
|
2021-10-16T16:41:09.000Z
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils import FieldTracker
from waldur_core.core import models as core_models
from waldur_core.structure import models as structure_models
class VirtualMachineMixin(models.Model):
class Meta:
abstract = True
guest_os = models.CharField(
max_length=50,
help_text=_(
'Defines the valid guest operating system '
'types used for configuring a virtual machine'
),
)
cores = models.PositiveSmallIntegerField(
default=0, help_text=_('Number of cores in a VM')
)
cores_per_socket = models.PositiveSmallIntegerField(
default=1, help_text=_('Number of cores per socket in a VM')
)
ram = models.PositiveIntegerField(
default=0, help_text=_('Memory size in MiB'), verbose_name=_('RAM')
)
disk = models.PositiveIntegerField(default=0, help_text=_('Disk size in MiB'))
class VirtualMachine(
VirtualMachineMixin, core_models.RuntimeStateMixin, structure_models.BaseResource
):
class RuntimeStates:
POWERED_OFF = 'POWERED_OFF'
POWERED_ON = 'POWERED_ON'
SUSPENDED = 'SUSPENDED'
CHOICES = (
(POWERED_OFF, 'Powered off'),
(POWERED_ON, 'Powered on'),
(SUSPENDED, 'Suspended'),
)
class GuestPowerStates:
RUNNING = 'RUNNING'
SHUTTING_DOWN = 'SHUTTING_DOWN'
RESETTING = 'RESETTING'
STANDBY = 'STANDBY'
NOT_RUNNING = 'NOT_RUNNING'
UNAVAILABLE = 'UNAVAILABLE'
CHOICES = (
(RUNNING, 'Running'),
(SHUTTING_DOWN, 'Shutting down'),
(RESETTING, 'Resetting'),
(STANDBY, 'Standby'),
(NOT_RUNNING, 'Not running'),
(UNAVAILABLE, 'Unavailable'),
)
class ToolsStates:
STARTING = 'STARTING'
RUNNING = 'RUNNING'
NOT_RUNNING = 'NOT_RUNNING'
CHOICES = (
(STARTING, 'Starting'),
(RUNNING, 'Running'),
(NOT_RUNNING, 'Not running'),
)
template = models.ForeignKey('Template', null=True, on_delete=models.SET_NULL)
cluster = models.ForeignKey('Cluster', null=True, on_delete=models.SET_NULL)
datastore = models.ForeignKey('Datastore', null=True, on_delete=models.SET_NULL)
folder = models.ForeignKey('Folder', null=True, on_delete=models.SET_NULL)
networks = models.ManyToManyField('Network', blank=True)
guest_power_enabled = models.BooleanField(
default=False,
help_text='Flag indicating if the virtual machine is ready to process soft power operations.',
)
guest_power_state = models.CharField(
'The power state of the guest operating system.',
max_length=150,
blank=True,
choices=GuestPowerStates.CHOICES,
)
tools_installed = models.BooleanField(default=False)
tools_state = models.CharField(
'Current running status of VMware Tools running in the guest operating system.',
max_length=50,
blank=True,
choices=ToolsStates.CHOICES,
)
tracker = FieldTracker()
@classmethod
def get_backend_fields(cls):
return super(VirtualMachine, cls).get_backend_fields() + (
'runtime_state',
'cores',
'cores_per_socket',
'ram',
'disk',
'tools_installed',
'tools_state',
)
@classmethod
def get_url_name(cls):
return 'vmware-virtual-machine'
@property
def total_disk(self):
return self.disks.aggregate(models.Sum('size'))['size__sum'] or 0
def __str__(self):
return self.name
class Port(core_models.RuntimeStateMixin, structure_models.BaseResource):
vm = models.ForeignKey(on_delete=models.CASCADE, to=VirtualMachine)
network = models.ForeignKey(on_delete=models.CASCADE, to='Network')
mac_address = models.CharField(
max_length=32, blank=True, verbose_name=_('MAC address')
)
@classmethod
def get_backend_fields(cls):
return super(Port, cls).get_backend_fields() + ('name', 'mac_address')
@classmethod
def get_url_name(cls):
return 'vmware-port'
def __str__(self):
return self.name
class Disk(structure_models.BaseResource):
size = models.PositiveIntegerField(help_text=_('Size in MiB'))
vm = models.ForeignKey(
on_delete=models.CASCADE, to=VirtualMachine, related_name='disks'
)
@classmethod
def get_url_name(cls):
return 'vmware-disk'
def __str__(self):
return self.name
@classmethod
def get_backend_fields(cls):
return super(Disk, cls).get_backend_fields() + ('name', 'size')
class Template(
VirtualMachineMixin, core_models.DescribableMixin, structure_models.ServiceProperty
):
created = models.DateTimeField()
modified = models.DateTimeField()
@classmethod
def get_url_name(cls):
return 'vmware-template'
def __str__(self):
return self.name
class Cluster(structure_models.ServiceProperty):
@classmethod
def get_url_name(cls):
return 'vmware-cluster'
def __str__(self):
return '%s / %s' % (self.settings, self.name)
class CustomerCluster(models.Model):
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
cluster = models.ForeignKey('Cluster', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.cluster)
class Meta:
unique_together = ('customer', 'cluster')
class Network(structure_models.ServiceProperty):
type = models.CharField(max_length=255)
@classmethod
def get_url_name(cls):
return 'vmware-network'
def __str__(self):
return '%s / %s' % (self.settings, self.name)
class CustomerNetwork(models.Model):
# This model allows to specify allowed networks for VM provision
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
network = models.ForeignKey('Network', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.network)
class Meta:
unique_together = ('customer', 'network')
class CustomerNetworkPair(models.Model):
# This model allows to specify allowed networks for existing VM NIC provision
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
network = models.ForeignKey('Network', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.network)
class Meta:
unique_together = ('customer', 'network')
class Datastore(structure_models.ServiceProperty):
type = models.CharField(max_length=255)
capacity = models.PositiveIntegerField(
help_text="Capacity, in MB.", null=True, blank=True
)
free_space = models.PositiveIntegerField(
help_text="Available space, in MB.", null=True, blank=True
)
@classmethod
def get_url_name(cls):
return 'vmware-datastore'
def __str__(self):
return '%s / %s' % (self.settings, self.name)
class CustomerDatastore(models.Model):
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
datastore = models.ForeignKey('Datastore', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.datastore)
class Meta:
unique_together = ('customer', 'datastore')
class Folder(structure_models.ServiceProperty):
def __str__(self):
return '%s / %s' % (self.settings, self.name)
@classmethod
def get_url_name(cls):
return 'vmware-folder'
class CustomerFolder(models.Model):
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
folder = models.ForeignKey('Folder', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.folder)
class Meta:
unique_together = ('customer', 'folder')
| 29.576642
| 102
| 0.659427
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils import FieldTracker
from waldur_core.core import models as core_models
from waldur_core.structure import models as structure_models
class VirtualMachineMixin(models.Model):
class Meta:
abstract = True
guest_os = models.CharField(
max_length=50,
help_text=_(
'Defines the valid guest operating system '
'types used for configuring a virtual machine'
),
)
cores = models.PositiveSmallIntegerField(
default=0, help_text=_('Number of cores in a VM')
)
cores_per_socket = models.PositiveSmallIntegerField(
default=1, help_text=_('Number of cores per socket in a VM')
)
ram = models.PositiveIntegerField(
default=0, help_text=_('Memory size in MiB'), verbose_name=_('RAM')
)
disk = models.PositiveIntegerField(default=0, help_text=_('Disk size in MiB'))
class VirtualMachine(
VirtualMachineMixin, core_models.RuntimeStateMixin, structure_models.BaseResource
):
class RuntimeStates:
POWERED_OFF = 'POWERED_OFF'
POWERED_ON = 'POWERED_ON'
SUSPENDED = 'SUSPENDED'
CHOICES = (
(POWERED_OFF, 'Powered off'),
(POWERED_ON, 'Powered on'),
(SUSPENDED, 'Suspended'),
)
class GuestPowerStates:
RUNNING = 'RUNNING'
SHUTTING_DOWN = 'SHUTTING_DOWN'
RESETTING = 'RESETTING'
STANDBY = 'STANDBY'
NOT_RUNNING = 'NOT_RUNNING'
UNAVAILABLE = 'UNAVAILABLE'
CHOICES = (
(RUNNING, 'Running'),
(SHUTTING_DOWN, 'Shutting down'),
(RESETTING, 'Resetting'),
(STANDBY, 'Standby'),
(NOT_RUNNING, 'Not running'),
(UNAVAILABLE, 'Unavailable'),
)
class ToolsStates:
STARTING = 'STARTING'
RUNNING = 'RUNNING'
NOT_RUNNING = 'NOT_RUNNING'
CHOICES = (
(STARTING, 'Starting'),
(RUNNING, 'Running'),
(NOT_RUNNING, 'Not running'),
)
template = models.ForeignKey('Template', null=True, on_delete=models.SET_NULL)
cluster = models.ForeignKey('Cluster', null=True, on_delete=models.SET_NULL)
datastore = models.ForeignKey('Datastore', null=True, on_delete=models.SET_NULL)
folder = models.ForeignKey('Folder', null=True, on_delete=models.SET_NULL)
networks = models.ManyToManyField('Network', blank=True)
guest_power_enabled = models.BooleanField(
default=False,
help_text='Flag indicating if the virtual machine is ready to process soft power operations.',
)
guest_power_state = models.CharField(
'The power state of the guest operating system.',
max_length=150,
blank=True,
choices=GuestPowerStates.CHOICES,
)
tools_installed = models.BooleanField(default=False)
tools_state = models.CharField(
'Current running status of VMware Tools running in the guest operating system.',
max_length=50,
blank=True,
choices=ToolsStates.CHOICES,
)
tracker = FieldTracker()
@classmethod
def get_backend_fields(cls):
return super(VirtualMachine, cls).get_backend_fields() + (
'runtime_state',
'cores',
'cores_per_socket',
'ram',
'disk',
'tools_installed',
'tools_state',
)
@classmethod
def get_url_name(cls):
return 'vmware-virtual-machine'
@property
def total_disk(self):
return self.disks.aggregate(models.Sum('size'))['size__sum'] or 0
def __str__(self):
return self.name
class Port(core_models.RuntimeStateMixin, structure_models.BaseResource):
vm = models.ForeignKey(on_delete=models.CASCADE, to=VirtualMachine)
network = models.ForeignKey(on_delete=models.CASCADE, to='Network')
mac_address = models.CharField(
max_length=32, blank=True, verbose_name=_('MAC address')
)
@classmethod
def get_backend_fields(cls):
return super(Port, cls).get_backend_fields() + ('name', 'mac_address')
@classmethod
def get_url_name(cls):
return 'vmware-port'
def __str__(self):
return self.name
class Disk(structure_models.BaseResource):
size = models.PositiveIntegerField(help_text=_('Size in MiB'))
vm = models.ForeignKey(
on_delete=models.CASCADE, to=VirtualMachine, related_name='disks'
)
@classmethod
def get_url_name(cls):
return 'vmware-disk'
def __str__(self):
return self.name
@classmethod
def get_backend_fields(cls):
return super(Disk, cls).get_backend_fields() + ('name', 'size')
class Template(
VirtualMachineMixin, core_models.DescribableMixin, structure_models.ServiceProperty
):
created = models.DateTimeField()
modified = models.DateTimeField()
@classmethod
def get_url_name(cls):
return 'vmware-template'
def __str__(self):
return self.name
class Cluster(structure_models.ServiceProperty):
@classmethod
def get_url_name(cls):
return 'vmware-cluster'
def __str__(self):
return '%s / %s' % (self.settings, self.name)
class CustomerCluster(models.Model):
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
cluster = models.ForeignKey('Cluster', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.cluster)
class Meta:
unique_together = ('customer', 'cluster')
class Network(structure_models.ServiceProperty):
type = models.CharField(max_length=255)
@classmethod
def get_url_name(cls):
return 'vmware-network'
def __str__(self):
return '%s / %s' % (self.settings, self.name)
class CustomerNetwork(models.Model):
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
network = models.ForeignKey('Network', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.network)
class Meta:
unique_together = ('customer', 'network')
class CustomerNetworkPair(models.Model):
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
network = models.ForeignKey('Network', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.network)
class Meta:
unique_together = ('customer', 'network')
class Datastore(structure_models.ServiceProperty):
type = models.CharField(max_length=255)
capacity = models.PositiveIntegerField(
help_text="Capacity, in MB.", null=True, blank=True
)
free_space = models.PositiveIntegerField(
help_text="Available space, in MB.", null=True, blank=True
)
@classmethod
def get_url_name(cls):
return 'vmware-datastore'
def __str__(self):
return '%s / %s' % (self.settings, self.name)
class CustomerDatastore(models.Model):
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
datastore = models.ForeignKey('Datastore', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.datastore)
class Meta:
unique_together = ('customer', 'datastore')
class Folder(structure_models.ServiceProperty):
def __str__(self):
return '%s / %s' % (self.settings, self.name)
@classmethod
def get_url_name(cls):
return 'vmware-folder'
class CustomerFolder(models.Model):
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
folder = models.ForeignKey('Folder', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.folder)
class Meta:
unique_together = ('customer', 'folder')
| true
| true
|
f7044cad6db8570c9fda70bb4a54726e8a97dc08
| 321
|
py
|
Python
|
log_metrics/__init__.py
|
simpleenergy/log-metrics
|
af91bfecc2a6f39ee26a2e394e3782495ffc98b1
|
[
"Apache-2.0"
] | 5
|
2015-09-23T23:15:37.000Z
|
2017-11-27T06:43:54.000Z
|
log_metrics/__init__.py
|
simpleenergy/log-metrics
|
af91bfecc2a6f39ee26a2e394e3782495ffc98b1
|
[
"Apache-2.0"
] | 1
|
2015-02-07T23:26:32.000Z
|
2015-02-07T23:26:32.000Z
|
log_metrics/__init__.py
|
simpleenergy/log-metrics
|
af91bfecc2a6f39ee26a2e394e3782495ffc98b1
|
[
"Apache-2.0"
] | 2
|
2015-01-19T05:58:43.000Z
|
2018-07-30T17:24:57.000Z
|
# -*- coding: utf-8 -*-
# Meta
__version__ = "0.0.4"
__author__ = 'Rhys Elsmore'
__email__ = 'me@rhys.io'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2014 Rhys Elsmore'
# Module Namespace
from .core import MetricsLogger, GroupMetricsLogger
from .api import timer, increment, sample, measure, unique, group
| 20.0625
| 65
| 0.725857
|
__version__ = "0.0.4"
__author__ = 'Rhys Elsmore'
__email__ = 'me@rhys.io'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2014 Rhys Elsmore'
from .core import MetricsLogger, GroupMetricsLogger
from .api import timer, increment, sample, measure, unique, group
| true
| true
|
f7044d3c0712db7cd4bb71dcc732d7614526c066
| 1,423
|
bzl
|
Python
|
proto/workspace.bzl
|
Yannic/rules_proto
|
4a4b83abfbfe018387a5b58986efa888850048c4
|
[
"Apache-2.0"
] | 5
|
2019-06-13T19:03:50.000Z
|
2019-08-07T14:23:52.000Z
|
proto/workspace.bzl
|
Yannic/rules_proto
|
4a4b83abfbfe018387a5b58986efa888850048c4
|
[
"Apache-2.0"
] | 5
|
2019-06-18T11:44:50.000Z
|
2019-06-24T14:05:35.000Z
|
proto/workspace.bzl
|
Yannic/rules_proto
|
4a4b83abfbfe018387a5b58986efa888850048c4
|
[
"Apache-2.0"
] | 1
|
2019-06-19T21:50:23.000Z
|
2019-06-19T21:50:23.000Z
|
## Copyright 2019 The Rules Protobuf Authors. All rights reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
load("//proto:repositories.bzl", "rules_proto_dependencies")
load("//proto:repositories.bzl", "rules_proto_toolchains")
_DEPRECATED_REPOSITORY_RULE_MESSAGE = " ".join([
"{old_rule}() is deprecated.",
"Please import @build_bazel_rules_proto//proto:repositories.bzl and use {new_rule}().",
"See https://github.com/Yannic/rules_proto/issues/6",
])
def proto_import_dependencies():
print(_DEPRECATED_REPOSITORY_RULE_MESSAGE.format(
old_rule = "proto_import_dependencies",
new_rule = "rules_proto_dependencies",
))
rules_proto_dependencies()
def proto_register_toolchains():
print(_DEPRECATED_REPOSITORY_RULE_MESSAGE.format(
old_rule = "proto_register_toolchains",
new_rule = "rules_proto_toolchains",
))
rules_proto_toolchains()
| 36.487179
| 91
| 0.740689
|
load("//proto:repositories.bzl", "rules_proto_dependencies")
load("//proto:repositories.bzl", "rules_proto_toolchains")
_DEPRECATED_REPOSITORY_RULE_MESSAGE = " ".join([
"{old_rule}() is deprecated.",
"Please import @build_bazel_rules_proto//proto:repositories.bzl and use {new_rule}().",
"See https://github.com/Yannic/rules_proto/issues/6",
])
def proto_import_dependencies():
print(_DEPRECATED_REPOSITORY_RULE_MESSAGE.format(
old_rule = "proto_import_dependencies",
new_rule = "rules_proto_dependencies",
))
rules_proto_dependencies()
def proto_register_toolchains():
print(_DEPRECATED_REPOSITORY_RULE_MESSAGE.format(
old_rule = "proto_register_toolchains",
new_rule = "rules_proto_toolchains",
))
rules_proto_toolchains()
| true
| true
|
f7044f1d26e519871cd3864ab2531d33132e654e
| 21,972
|
py
|
Python
|
test/python/test_tensor.py
|
XinChCh/singa
|
93fd9da72694e68bfe3fb29d0183a65263d238a1
|
[
"Apache-2.0"
] | 2,354
|
2015-05-05T03:01:56.000Z
|
2019-10-22T15:08:11.000Z
|
test/python/test_tensor.py
|
Dadaguaibuhaoyisi/singa
|
93fd9da72694e68bfe3fb29d0183a65263d238a1
|
[
"Apache-2.0"
] | 332
|
2019-10-24T15:06:32.000Z
|
2022-03-07T06:22:32.000Z
|
test/python/test_tensor.py
|
Dadaguaibuhaoyisi/singa
|
93fd9da72694e68bfe3fb29d0183a65263d238a1
|
[
"Apache-2.0"
] | 607
|
2015-05-03T14:09:05.000Z
|
2019-10-21T09:49:21.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# =============================================================================
from __future__ import division
import math
import unittest
import random
import numpy as np
from singa import tensor
from singa import singa_wrap as singa_api
from singa import autograd
from cuda_helper import gpu_dev, cpu_dev
class TestTensorMethods(unittest.TestCase):
def setUp(self):
self.shape = (2, 3)
self.t = tensor.Tensor(self.shape)
self.s = tensor.Tensor(self.shape)
self.t.set_value(0)
self.s.set_value(0)
def test_tensor_fields(self):
t = self.t
shape = self.shape
self.assertTupleEqual(t.shape, shape)
self.assertEqual(t.shape[0], shape[0])
self.assertEqual(t.shape[1], shape[1])
self.assertEqual(tensor.product(shape), 2 * 3)
self.assertEqual(t.ndim(), 2)
self.assertEqual(t.size(), 2 * 3)
self.assertEqual(t.memsize(), 2 * 3 * tensor.sizeof(tensor.float32))
self.assertFalse(t.is_transpose())
def test_unary_operators(self):
t = self.t
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 0.0)
t += 1.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
t -= 0.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23 - 0.23)
t *= 2.5
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], (1.23 - 0.23) * 2.5)
t /= 2
self.assertAlmostEqual(
tensor.to_numpy(t)[0, 0], (1.23 - 0.23) * 2.5 / 2)
def test_binary_operators(self):
t = self.t
t += 3.2
s = self.s
s += 2.1
a = t + s
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 + 2.1, 5)
a = t - s
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 - 2.1, 5)
a = t * s
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 * 2.1, 5)
''' not implemented yet
a = t / s
self.assertAlmostEqual(tensor.to_numpy(a)[0,0], 3.2/2.1, 5)
'''
def test_comparison_operators(self):
t = self.t
t += 3.45
a = t < 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = t <= 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = t > 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = t >= 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = t == 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = tensor.lt(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = tensor.le(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = tensor.gt(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = tensor.ge(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = tensor.eq(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
def test_tensor_copy(self):
t = tensor.Tensor((2, 3))
t += 1.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
tc = t.copy()
tdc = t.deepcopy()
self.assertAlmostEqual(tensor.to_numpy(tc)[0, 0], 1.23)
self.assertAlmostEqual(tensor.to_numpy(tdc)[0, 0], 1.23)
t += 1.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 2.46)
self.assertAlmostEqual(tensor.to_numpy(tc)[0, 0], 2.46)
self.assertAlmostEqual(tensor.to_numpy(tdc)[0, 0], 1.23)
def test_copy_data(self):
t = self.t
t += 1.23
s = self.s
s += 5.43
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
tensor.copy_data_to_from(t, s, 2)
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 5.43, 5)
self.assertAlmostEqual(tensor.to_numpy(t)[0, 1], 5.43, 5)
self.assertAlmostEqual(tensor.to_numpy(t)[0, 2], 1.23)
def test_global_method(self):
t = self.t
t += 12.34
a = tensor.log(t)
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], math.log(12.34))
def test_random(self):
x = tensor.Tensor((1000,))
x.gaussian(1, 0.01)
self.assertAlmostEqual(tensor.average(x), 1, 3)
def test_radd(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 1 + x
self.assertEqual(tensor.average(y), 2.)
def test_rsub(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 1 - x
self.assertEqual(tensor.average(y), 0.)
def test_rmul(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 2 * x
self.assertEqual(tensor.average(y), 2.)
def test_rdiv(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 2 / x
self.assertEqual(tensor.average(y), 2.)
def matmul_high_dim_helper(self, dev):
configs = [
[(1, 12, 7, 64), (1, 12, 64, 7)],
[(1, 7, 768), (768, 768)],
]
print()
for config in configs:
X = np.random.random(config[0]).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
W = np.random.random(config[1]).astype(np.float32)
w = tensor.from_numpy(W)
w.to_device(dev)
y_t = np.matmul(X, W)
y = autograd.matmul(x, w)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), y_t, 3)
def test_matmul_high_dim_cpu(self):
self.matmul_high_dim_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_matmul_high_dim_gpu(self):
self.matmul_high_dim_helper(gpu_dev)
def test_tensor_inplace_api(self):
""" tensor inplace methods alter internal state and also return self
"""
x = tensor.Tensor((3,))
y = x.set_value(1)
self.assertTrue(y is x)
x = tensor.Tensor((3,))
y = x.uniform(1, 2)
self.assertTrue(y is x)
x = tensor.Tensor((3,))
y = x.bernoulli(1)
self.assertTrue(y is x)
x = tensor.Tensor((3,))
y = x.gaussian(1, 2)
self.assertTrue(y is x)
def test_numpy_convert(self):
a = np.asarray([[1, 0, 0], [0, 1, 0]], dtype=np.int)
t = tensor.from_numpy(a)
b = tensor.to_numpy(t)
self.assertEqual(np.sum(a - b), 0)
a = np.asarray([[1, 0, 0], [0, 1, 0]], dtype=np.float32)
t = tensor.from_numpy(a)
b = tensor.to_numpy(t)
self.assertEqual(np.sum(a - b), 0.)
def test_transpose(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
A1 = np.transpose(a)
tA1 = tensor.transpose(ta)
TA1 = tensor.to_numpy(tA1)
A2 = np.transpose(a, [0, 2, 1])
tA2 = tensor.transpose(ta, [0, 2, 1])
TA2 = tensor.to_numpy(tA2)
np.testing.assert_array_almost_equal(TA1, A1)
np.testing.assert_array_almost_equal(TA2, A2)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_gpu_6d_transpose(self,dev=gpu_dev):
s0 = (2,3,4,5,6,7)
axes1=[5,4,3,2,1,0]
s1 = (2,7,6,5,4,3)
s2 = (2,4,3,5,7,6)
a = np.random.random(s1)
ta = tensor.from_numpy(a)
ta.to_device(dev)
ta = tensor.reshape(ta,s1)
ta = tensor.transpose(ta,axes1)
ta = tensor.reshape(ta,s2)
a = np.reshape(a,s1)
a = np.transpose(a,axes1)
a = np.reshape(a,s2)
np.testing.assert_array_almost_equal(tensor.to_numpy(ta), a)
def test_einsum(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
res1 = np.einsum('kij,kij->kij', a, a)
tres1 = tensor.einsum('kij,kij->kij', ta, ta)
Tres1 = tensor.to_numpy(tres1)
res2 = np.einsum('kij,kih->kjh', a, a)
tres2 = tensor.einsum('kij,kih->kjh', ta, ta)
Tres2 = tensor.to_numpy(tres2)
self.assertAlmostEqual(np.sum(Tres1 - res1), 0., places=3)
self.assertAlmostEqual(np.sum(Tres2 - res2), 0., places=3)
def test_repeat(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
ta_repeat1 = tensor.repeat(ta, 2, axis=None)
a_repeat1 = np.repeat(a, 2, axis=None)
Ta_repeat1 = tensor.to_numpy(ta_repeat1)
ta_repeat2 = tensor.repeat(ta, 4, axis=1)
a_repeat2 = np.repeat(a, 4, axis=1)
Ta_repeat2 = tensor.to_numpy(ta_repeat2)
self.assertAlmostEqual(np.sum(Ta_repeat1 - a_repeat1), 0., places=3)
self.assertAlmostEqual(np.sum(Ta_repeat2 - a_repeat2), 0., places=3)
def test_sum(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
a_sum0 = np.sum(a)
ta_sum0 = tensor.sum(ta)
Ta_sum0 = tensor.to_numpy(ta_sum0)
a_sum1 = np.sum(a, axis=1)
ta_sum1 = tensor.sum(ta, axis=1)
Ta_sum1 = tensor.to_numpy(ta_sum1)
a_sum2 = np.sum(a, axis=2)
ta_sum2 = tensor.sum(ta, axis=2)
Ta_sum2 = tensor.to_numpy(ta_sum2)
self.assertAlmostEqual(np.sum(a_sum0 - Ta_sum0), 0., places=3)
self.assertAlmostEqual(np.sum(a_sum1 - Ta_sum1), 0., places=3)
self.assertAlmostEqual(np.sum(a_sum2 - Ta_sum2), 0., places=3)
def test_tensordot(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
res1 = np.tensordot(a, a, axes=1)
tres1 = tensor.tensordot(ta, ta, axes=1)
Tres1 = tensor.to_numpy(tres1)
self.assertAlmostEqual(np.sum(Tres1 - res1), 0., places=3)
np.testing.assert_array_almost_equal(Tres1, res1)
res2 = np.tensordot(a, a, axes=([0, 1], [2, 1]))
tres2 = tensor.tensordot(ta, ta, axes=([0, 1], [2, 1]))
np.testing.assert_array_almost_equal(tensor.to_numpy(tres2), res2)
def test_reshape(self):
a = np.array([[[1.1, 1.1, 1.4], [1.1, 1.1, 1.1]],
[[1.1, 1.1, 1.3], [1.6, 1.1, 1.2]]])
ta = tensor.from_numpy(a)
tb = tensor.reshape(ta, [2, 6])
self.assertAlmostEqual(tb.shape[0], 2., places=3)
self.assertAlmostEqual(tb.shape[1], 6., places=3)
np.testing.assert_array_almost_equal(tensor.to_numpy(tb),
a.reshape((2, 6)))
def test_transpose_then_reshape(self):
a = np.array([[[1.1, 1.1], [1.1, 1.1], [1.4, 1.3]],
[[1.1, 1.6], [1.1, 1.1], [1.1, 1.2]]])
TRANSPOSE_AXES = (2, 0, 1)
RESHAPE_DIMS = (2, 6)
ta = tensor.from_numpy(a)
ta = ta.transpose(TRANSPOSE_AXES)
ta = ta.reshape(RESHAPE_DIMS)
np.testing.assert_array_almost_equal(
tensor.to_numpy(ta),
np.reshape(a.transpose(TRANSPOSE_AXES), RESHAPE_DIMS))
def _concatenate_helper(self, dev):
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np2 = np.random.random([5, 6, 7, 1]).astype(np.float32)
np3 = np.concatenate((np1, np2), axis=3)
t1 = tensor.Tensor(device=dev, data=np1)
t2 = tensor.Tensor(device=dev, data=np2)
t3 = tensor.concatenate((t1, t2), 3)
np.testing.assert_array_almost_equal(tensor.to_numpy(t3), np3)
def test_concatenate_cpu(self):
self._concatenate_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_concatenate_gpu(self):
self._concatenate_helper(gpu_dev)
def _subscription_helper(self, dev):
np1 = np.random.random((5, 5, 5, 5)).astype(np.float32)
sg_tensor = tensor.Tensor(device=dev, data=np1)
sg_tensor_ret = sg_tensor[1:3, :, 1:, :-1]
np.testing.assert_array_almost_equal((tensor.to_numpy(sg_tensor_ret)),
np1[1:3, :, 1:, :-1])
def test_subscription_cpu(self):
self._subscription_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_subscription_gpu(self):
self._subscription_helper(gpu_dev)
def _ceil_helper(self, dev):
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np1 = np1 * 10
np2 = np.ceil(np1)
t1 = tensor.Tensor(device=dev, data=np1)
t2 = tensor.ceil(t1)
np.testing.assert_array_almost_equal(tensor.to_numpy(t2), np2)
def test_ceil_cpu(self):
self._ceil_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_ceil_gpu(self):
self._ceil_helper(gpu_dev)
def _astype_helper(self, dev):
shape1 = [2, 3]
shape2 = [3, 2]
np_flt = np.random.random(shape1).astype(np.float32)
np_flt = np_flt * 10 - 5
np_int = np_flt.astype(np.int32)
np_flt2 = np_int.astype(np.float32)
t2 = tensor.Tensor(device=dev, data=np_flt)
t2 = t2.as_type('int')
np.testing.assert_array_almost_equal(tensor.to_numpy(t2), np_int)
t1 = t2.reshape(shape2)
np.testing.assert_array_almost_equal(tensor.to_numpy(t1),
np_int.reshape(shape2))
t1 = t1.as_type('float')
np.testing.assert_array_almost_equal(tensor.to_numpy(t1),
np_flt2.reshape(shape2))
def test_astype_cpu(self):
self._astype_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_astype_gpu(self):
self._astype_helper(gpu_dev)
def _3d_matmul_helper(self, dev):
np_x1 = np.random.randn(2, 3, 4).astype(np.float32)
np_x2 = np.random.randn(2, 4, 3).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
np_x1 = np.random.randn(2, 3, 4).astype(np.float32)
np_x2 = np.random.randn(2, 4, 5).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
def test_3d_matmul_cpu(self):
self._3d_matmul_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_3d_matmul_gpu(self):
self._3d_matmul_helper(gpu_dev)
def _4d_matmul_helper(self, dev):
np_x1 = np.random.randn(2, 12, 256, 64).astype(np.float32)
np_x2 = np.random.randn(2, 12, 64, 256).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
np_x1 = np.random.randn(2, 12, 256, 64).astype(np.float32)
np_x2 = np.random.randn(2, 12, 64, 1024).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
def test_4d_matmul_cpu(self):
self._4d_matmul_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_4d_matmul_gpu(self):
self._4d_matmul_helper(gpu_dev)
def _matmul_transpose_helper(self, dev):
X = np.random.random((1, 256, 12, 64)).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
W = np.random.random((1, 256, 12, 64)).astype(np.float32)
w = tensor.from_numpy(W)
w.to_device(dev)
X = np.transpose(X, (0, 2, 1, 3))
W = np.transpose(W, (0, 2, 1, 3))
W = np.transpose(W, (0, 1, 3, 2))
Y = np.matmul(X, W)
x = autograd.transpose(x, (0, 2, 1, 3))
w = autograd.transpose(w, (0, 2, 1, 3))
w = autograd.transpose(w, (0, 1, 3, 2))
y = autograd.matmul(x, w)
np.testing.assert_array_almost_equal(tensor.to_numpy(x), X)
np.testing.assert_array_almost_equal(tensor.to_numpy(w), W)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), Y)
def test_matmul_transpose_cpu(self):
self._matmul_transpose_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_matmul_transpose_gpu(self):
self._matmul_transpose_helper(gpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_gaussian_gpu(self, dev=gpu_dev):
x = tensor.Tensor((3, 5, 3, 5), device=dev)
x.gaussian(0, 1)
x = tensor.Tensor((4, 5, 3, 2), device=dev)
x.gaussian(0, 1)
def _kfloat32_int(self, dev=gpu_dev):
np.random.seed(0)
x_val = np.random.random((2, 3)).astype(np.float32) * 10
x = tensor.from_numpy(x_val)
x.to_device(dev)
scalar = np.random.random((1,))[0] * 100
y = x + scalar
self.assertEqual(y.dtype, tensor.float32)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), x_val + scalar)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kfloat32_int_gpu(self):
self._kfloat32_int(gpu_dev)
def test_kfloat32_int_cpu(self):
self._kfloat32_int(cpu_dev)
def _kint_float(self, dev=gpu_dev):
np.random.seed(0)
x_val = np.random.randint(0, 10, (2, 3))
x = tensor.from_numpy(x_val)
x.to_device(dev)
scalar = random.random() * 100
y = x + scalar
self.assertEqual(y.dtype, tensor.float32)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), x_val + scalar, 5)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kint_float_gpu(self):
self._kint_float(gpu_dev)
def test_kint_float_cpu(self):
self._kint_float(cpu_dev)
def _kint_kint(self, dev=gpu_dev):
a_np = np.array([[[17, 4, 9, 22, 18], [-9, 9, -1, -1, 4],
[1, 14, 7, 1, 4], [3, 14, -2, 3, -8]],
[[-25, 6, 8, -7, 22], [-14, 0, -1, 15, 14],
[1, 3, -8, -19, -3], [1, 12, 12, -3, -3]],
[[-10, -14, -17, 19, -5], [-4, -12, 7, -16, -2],
[-8, 3, -5, -11, 0], [4, 0, 3, -6, -3]]],
dtype=np.int32)
b_np = np.array([[[-6, -3, -8, -17, 1], [-4, -16, 4, -9, 0],
[7, 1, 11, -12, 4], [-6, -8, -5, -3, 0]],
[[-11, 9, 4, -15, 14], [18, 11, -1, -10, 10],
[-4, 12, 2, 9, 3], [7, 0, 17, 1, 4]],
[[18, -13, -12, 9, -11], [19, -4, -7, 19, 14],
[18, 9, -8, 19, -2], [8, 9, -1, 6, 9]]],
dtype=np.int32)
ta = tensor.from_numpy(a_np)
tb = tensor.from_numpy(b_np)
ta.to_device(dev)
tb.to_device(dev)
y = ta - tb
np.testing.assert_array_almost_equal(tensor.to_numpy(y), a_np - b_np)
def test_kint_kint_cpu(self, dev=cpu_dev):
self._kint_kint(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kint_kint_gpu(self, dev=gpu_dev):
self._kint_kint(gpu_dev)
def _kint_kint_bc(self, dev=gpu_dev):
a_np = np.array([[[17, 4, 9, 22, 18], [-9, 9, -1, -1, 4],
[1, 14, 7, 1, 4], [3, 14, -2, 3, -8]],
[[-25, 6, 8, -7, 22], [-14, 0, -1, 15, 14],
[1, 3, -8, -19, -3], [1, 12, 12, -3, -3]],
[[-10, -14, -17, 19, -5], [-4, -12, 7, -16, -2],
[-8, 3, -5, -11, 0], [4, 0, 3, -6, -3]]],
dtype=np.int32)
b_np = np.array([[-6, -3, -8, -17, 1], [-4, -16, 4, -9, 0],
[7, 1, 11, -12, 4], [-6, -8, -5, -3, 0]],
dtype=np.int32)
ta = tensor.from_numpy(a_np)
tb = tensor.from_numpy(b_np)
ta.to_device(dev)
tb.to_device(dev)
y = ta - tb
np.testing.assert_array_almost_equal(tensor.to_numpy(y), a_np - b_np)
def test_kint_kint_bc_cpu(self, dev=cpu_dev):
self._kint_kint_bc(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kint_kint_bc_gpu(self, dev=gpu_dev):
self._kint_kint_bc(gpu_dev)
if __name__ == '__main__':
unittest.main()
| 35.495961
| 83
| 0.563444
|
from __future__ import division
import math
import unittest
import random
import numpy as np
from singa import tensor
from singa import singa_wrap as singa_api
from singa import autograd
from cuda_helper import gpu_dev, cpu_dev
class TestTensorMethods(unittest.TestCase):
def setUp(self):
self.shape = (2, 3)
self.t = tensor.Tensor(self.shape)
self.s = tensor.Tensor(self.shape)
self.t.set_value(0)
self.s.set_value(0)
def test_tensor_fields(self):
t = self.t
shape = self.shape
self.assertTupleEqual(t.shape, shape)
self.assertEqual(t.shape[0], shape[0])
self.assertEqual(t.shape[1], shape[1])
self.assertEqual(tensor.product(shape), 2 * 3)
self.assertEqual(t.ndim(), 2)
self.assertEqual(t.size(), 2 * 3)
self.assertEqual(t.memsize(), 2 * 3 * tensor.sizeof(tensor.float32))
self.assertFalse(t.is_transpose())
def test_unary_operators(self):
t = self.t
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 0.0)
t += 1.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
t -= 0.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23 - 0.23)
t *= 2.5
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], (1.23 - 0.23) * 2.5)
t /= 2
self.assertAlmostEqual(
tensor.to_numpy(t)[0, 0], (1.23 - 0.23) * 2.5 / 2)
def test_binary_operators(self):
t = self.t
t += 3.2
s = self.s
s += 2.1
a = t + s
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 + 2.1, 5)
a = t - s
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 - 2.1, 5)
a = t * s
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 * 2.1, 5)
def test_comparison_operators(self):
t = self.t
t += 3.45
a = t < 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = t <= 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = t > 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = t >= 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = t == 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = tensor.lt(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = tensor.le(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = tensor.gt(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = tensor.ge(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = tensor.eq(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
def test_tensor_copy(self):
t = tensor.Tensor((2, 3))
t += 1.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
tc = t.copy()
tdc = t.deepcopy()
self.assertAlmostEqual(tensor.to_numpy(tc)[0, 0], 1.23)
self.assertAlmostEqual(tensor.to_numpy(tdc)[0, 0], 1.23)
t += 1.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 2.46)
self.assertAlmostEqual(tensor.to_numpy(tc)[0, 0], 2.46)
self.assertAlmostEqual(tensor.to_numpy(tdc)[0, 0], 1.23)
def test_copy_data(self):
t = self.t
t += 1.23
s = self.s
s += 5.43
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
tensor.copy_data_to_from(t, s, 2)
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 5.43, 5)
self.assertAlmostEqual(tensor.to_numpy(t)[0, 1], 5.43, 5)
self.assertAlmostEqual(tensor.to_numpy(t)[0, 2], 1.23)
def test_global_method(self):
t = self.t
t += 12.34
a = tensor.log(t)
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], math.log(12.34))
def test_random(self):
x = tensor.Tensor((1000,))
x.gaussian(1, 0.01)
self.assertAlmostEqual(tensor.average(x), 1, 3)
def test_radd(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 1 + x
self.assertEqual(tensor.average(y), 2.)
def test_rsub(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 1 - x
self.assertEqual(tensor.average(y), 0.)
def test_rmul(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 2 * x
self.assertEqual(tensor.average(y), 2.)
def test_rdiv(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 2 / x
self.assertEqual(tensor.average(y), 2.)
def matmul_high_dim_helper(self, dev):
configs = [
[(1, 12, 7, 64), (1, 12, 64, 7)],
[(1, 7, 768), (768, 768)],
]
print()
for config in configs:
X = np.random.random(config[0]).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
W = np.random.random(config[1]).astype(np.float32)
w = tensor.from_numpy(W)
w.to_device(dev)
y_t = np.matmul(X, W)
y = autograd.matmul(x, w)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), y_t, 3)
def test_matmul_high_dim_cpu(self):
self.matmul_high_dim_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_matmul_high_dim_gpu(self):
self.matmul_high_dim_helper(gpu_dev)
def test_tensor_inplace_api(self):
x = tensor.Tensor((3,))
y = x.set_value(1)
self.assertTrue(y is x)
x = tensor.Tensor((3,))
y = x.uniform(1, 2)
self.assertTrue(y is x)
x = tensor.Tensor((3,))
y = x.bernoulli(1)
self.assertTrue(y is x)
x = tensor.Tensor((3,))
y = x.gaussian(1, 2)
self.assertTrue(y is x)
def test_numpy_convert(self):
a = np.asarray([[1, 0, 0], [0, 1, 0]], dtype=np.int)
t = tensor.from_numpy(a)
b = tensor.to_numpy(t)
self.assertEqual(np.sum(a - b), 0)
a = np.asarray([[1, 0, 0], [0, 1, 0]], dtype=np.float32)
t = tensor.from_numpy(a)
b = tensor.to_numpy(t)
self.assertEqual(np.sum(a - b), 0.)
def test_transpose(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
A1 = np.transpose(a)
tA1 = tensor.transpose(ta)
TA1 = tensor.to_numpy(tA1)
A2 = np.transpose(a, [0, 2, 1])
tA2 = tensor.transpose(ta, [0, 2, 1])
TA2 = tensor.to_numpy(tA2)
np.testing.assert_array_almost_equal(TA1, A1)
np.testing.assert_array_almost_equal(TA2, A2)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_gpu_6d_transpose(self,dev=gpu_dev):
s0 = (2,3,4,5,6,7)
axes1=[5,4,3,2,1,0]
s1 = (2,7,6,5,4,3)
s2 = (2,4,3,5,7,6)
a = np.random.random(s1)
ta = tensor.from_numpy(a)
ta.to_device(dev)
ta = tensor.reshape(ta,s1)
ta = tensor.transpose(ta,axes1)
ta = tensor.reshape(ta,s2)
a = np.reshape(a,s1)
a = np.transpose(a,axes1)
a = np.reshape(a,s2)
np.testing.assert_array_almost_equal(tensor.to_numpy(ta), a)
def test_einsum(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
res1 = np.einsum('kij,kij->kij', a, a)
tres1 = tensor.einsum('kij,kij->kij', ta, ta)
Tres1 = tensor.to_numpy(tres1)
res2 = np.einsum('kij,kih->kjh', a, a)
tres2 = tensor.einsum('kij,kih->kjh', ta, ta)
Tres2 = tensor.to_numpy(tres2)
self.assertAlmostEqual(np.sum(Tres1 - res1), 0., places=3)
self.assertAlmostEqual(np.sum(Tres2 - res2), 0., places=3)
def test_repeat(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
ta_repeat1 = tensor.repeat(ta, 2, axis=None)
a_repeat1 = np.repeat(a, 2, axis=None)
Ta_repeat1 = tensor.to_numpy(ta_repeat1)
ta_repeat2 = tensor.repeat(ta, 4, axis=1)
a_repeat2 = np.repeat(a, 4, axis=1)
Ta_repeat2 = tensor.to_numpy(ta_repeat2)
self.assertAlmostEqual(np.sum(Ta_repeat1 - a_repeat1), 0., places=3)
self.assertAlmostEqual(np.sum(Ta_repeat2 - a_repeat2), 0., places=3)
def test_sum(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
a_sum0 = np.sum(a)
ta_sum0 = tensor.sum(ta)
Ta_sum0 = tensor.to_numpy(ta_sum0)
a_sum1 = np.sum(a, axis=1)
ta_sum1 = tensor.sum(ta, axis=1)
Ta_sum1 = tensor.to_numpy(ta_sum1)
a_sum2 = np.sum(a, axis=2)
ta_sum2 = tensor.sum(ta, axis=2)
Ta_sum2 = tensor.to_numpy(ta_sum2)
self.assertAlmostEqual(np.sum(a_sum0 - Ta_sum0), 0., places=3)
self.assertAlmostEqual(np.sum(a_sum1 - Ta_sum1), 0., places=3)
self.assertAlmostEqual(np.sum(a_sum2 - Ta_sum2), 0., places=3)
def test_tensordot(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
res1 = np.tensordot(a, a, axes=1)
tres1 = tensor.tensordot(ta, ta, axes=1)
Tres1 = tensor.to_numpy(tres1)
self.assertAlmostEqual(np.sum(Tres1 - res1), 0., places=3)
np.testing.assert_array_almost_equal(Tres1, res1)
res2 = np.tensordot(a, a, axes=([0, 1], [2, 1]))
tres2 = tensor.tensordot(ta, ta, axes=([0, 1], [2, 1]))
np.testing.assert_array_almost_equal(tensor.to_numpy(tres2), res2)
def test_reshape(self):
a = np.array([[[1.1, 1.1, 1.4], [1.1, 1.1, 1.1]],
[[1.1, 1.1, 1.3], [1.6, 1.1, 1.2]]])
ta = tensor.from_numpy(a)
tb = tensor.reshape(ta, [2, 6])
self.assertAlmostEqual(tb.shape[0], 2., places=3)
self.assertAlmostEqual(tb.shape[1], 6., places=3)
np.testing.assert_array_almost_equal(tensor.to_numpy(tb),
a.reshape((2, 6)))
def test_transpose_then_reshape(self):
a = np.array([[[1.1, 1.1], [1.1, 1.1], [1.4, 1.3]],
[[1.1, 1.6], [1.1, 1.1], [1.1, 1.2]]])
TRANSPOSE_AXES = (2, 0, 1)
RESHAPE_DIMS = (2, 6)
ta = tensor.from_numpy(a)
ta = ta.transpose(TRANSPOSE_AXES)
ta = ta.reshape(RESHAPE_DIMS)
np.testing.assert_array_almost_equal(
tensor.to_numpy(ta),
np.reshape(a.transpose(TRANSPOSE_AXES), RESHAPE_DIMS))
def _concatenate_helper(self, dev):
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np2 = np.random.random([5, 6, 7, 1]).astype(np.float32)
np3 = np.concatenate((np1, np2), axis=3)
t1 = tensor.Tensor(device=dev, data=np1)
t2 = tensor.Tensor(device=dev, data=np2)
t3 = tensor.concatenate((t1, t2), 3)
np.testing.assert_array_almost_equal(tensor.to_numpy(t3), np3)
def test_concatenate_cpu(self):
self._concatenate_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_concatenate_gpu(self):
self._concatenate_helper(gpu_dev)
def _subscription_helper(self, dev):
np1 = np.random.random((5, 5, 5, 5)).astype(np.float32)
sg_tensor = tensor.Tensor(device=dev, data=np1)
sg_tensor_ret = sg_tensor[1:3, :, 1:, :-1]
np.testing.assert_array_almost_equal((tensor.to_numpy(sg_tensor_ret)),
np1[1:3, :, 1:, :-1])
def test_subscription_cpu(self):
self._subscription_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_subscription_gpu(self):
self._subscription_helper(gpu_dev)
def _ceil_helper(self, dev):
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np1 = np1 * 10
np2 = np.ceil(np1)
t1 = tensor.Tensor(device=dev, data=np1)
t2 = tensor.ceil(t1)
np.testing.assert_array_almost_equal(tensor.to_numpy(t2), np2)
def test_ceil_cpu(self):
self._ceil_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_ceil_gpu(self):
self._ceil_helper(gpu_dev)
def _astype_helper(self, dev):
shape1 = [2, 3]
shape2 = [3, 2]
np_flt = np.random.random(shape1).astype(np.float32)
np_flt = np_flt * 10 - 5
np_int = np_flt.astype(np.int32)
np_flt2 = np_int.astype(np.float32)
t2 = tensor.Tensor(device=dev, data=np_flt)
t2 = t2.as_type('int')
np.testing.assert_array_almost_equal(tensor.to_numpy(t2), np_int)
t1 = t2.reshape(shape2)
np.testing.assert_array_almost_equal(tensor.to_numpy(t1),
np_int.reshape(shape2))
t1 = t1.as_type('float')
np.testing.assert_array_almost_equal(tensor.to_numpy(t1),
np_flt2.reshape(shape2))
def test_astype_cpu(self):
self._astype_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_astype_gpu(self):
self._astype_helper(gpu_dev)
def _3d_matmul_helper(self, dev):
np_x1 = np.random.randn(2, 3, 4).astype(np.float32)
np_x2 = np.random.randn(2, 4, 3).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
np_x1 = np.random.randn(2, 3, 4).astype(np.float32)
np_x2 = np.random.randn(2, 4, 5).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
def test_3d_matmul_cpu(self):
self._3d_matmul_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_3d_matmul_gpu(self):
self._3d_matmul_helper(gpu_dev)
def _4d_matmul_helper(self, dev):
np_x1 = np.random.randn(2, 12, 256, 64).astype(np.float32)
np_x2 = np.random.randn(2, 12, 64, 256).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
np_x1 = np.random.randn(2, 12, 256, 64).astype(np.float32)
np_x2 = np.random.randn(2, 12, 64, 1024).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
def test_4d_matmul_cpu(self):
self._4d_matmul_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_4d_matmul_gpu(self):
self._4d_matmul_helper(gpu_dev)
def _matmul_transpose_helper(self, dev):
X = np.random.random((1, 256, 12, 64)).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
W = np.random.random((1, 256, 12, 64)).astype(np.float32)
w = tensor.from_numpy(W)
w.to_device(dev)
X = np.transpose(X, (0, 2, 1, 3))
W = np.transpose(W, (0, 2, 1, 3))
W = np.transpose(W, (0, 1, 3, 2))
Y = np.matmul(X, W)
x = autograd.transpose(x, (0, 2, 1, 3))
w = autograd.transpose(w, (0, 2, 1, 3))
w = autograd.transpose(w, (0, 1, 3, 2))
y = autograd.matmul(x, w)
np.testing.assert_array_almost_equal(tensor.to_numpy(x), X)
np.testing.assert_array_almost_equal(tensor.to_numpy(w), W)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), Y)
def test_matmul_transpose_cpu(self):
self._matmul_transpose_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_matmul_transpose_gpu(self):
self._matmul_transpose_helper(gpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_gaussian_gpu(self, dev=gpu_dev):
x = tensor.Tensor((3, 5, 3, 5), device=dev)
x.gaussian(0, 1)
x = tensor.Tensor((4, 5, 3, 2), device=dev)
x.gaussian(0, 1)
def _kfloat32_int(self, dev=gpu_dev):
np.random.seed(0)
x_val = np.random.random((2, 3)).astype(np.float32) * 10
x = tensor.from_numpy(x_val)
x.to_device(dev)
scalar = np.random.random((1,))[0] * 100
y = x + scalar
self.assertEqual(y.dtype, tensor.float32)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), x_val + scalar)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kfloat32_int_gpu(self):
self._kfloat32_int(gpu_dev)
def test_kfloat32_int_cpu(self):
self._kfloat32_int(cpu_dev)
def _kint_float(self, dev=gpu_dev):
np.random.seed(0)
x_val = np.random.randint(0, 10, (2, 3))
x = tensor.from_numpy(x_val)
x.to_device(dev)
scalar = random.random() * 100
y = x + scalar
self.assertEqual(y.dtype, tensor.float32)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), x_val + scalar, 5)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kint_float_gpu(self):
self._kint_float(gpu_dev)
def test_kint_float_cpu(self):
self._kint_float(cpu_dev)
def _kint_kint(self, dev=gpu_dev):
a_np = np.array([[[17, 4, 9, 22, 18], [-9, 9, -1, -1, 4],
[1, 14, 7, 1, 4], [3, 14, -2, 3, -8]],
[[-25, 6, 8, -7, 22], [-14, 0, -1, 15, 14],
[1, 3, -8, -19, -3], [1, 12, 12, -3, -3]],
[[-10, -14, -17, 19, -5], [-4, -12, 7, -16, -2],
[-8, 3, -5, -11, 0], [4, 0, 3, -6, -3]]],
dtype=np.int32)
b_np = np.array([[[-6, -3, -8, -17, 1], [-4, -16, 4, -9, 0],
[7, 1, 11, -12, 4], [-6, -8, -5, -3, 0]],
[[-11, 9, 4, -15, 14], [18, 11, -1, -10, 10],
[-4, 12, 2, 9, 3], [7, 0, 17, 1, 4]],
[[18, -13, -12, 9, -11], [19, -4, -7, 19, 14],
[18, 9, -8, 19, -2], [8, 9, -1, 6, 9]]],
dtype=np.int32)
ta = tensor.from_numpy(a_np)
tb = tensor.from_numpy(b_np)
ta.to_device(dev)
tb.to_device(dev)
y = ta - tb
np.testing.assert_array_almost_equal(tensor.to_numpy(y), a_np - b_np)
def test_kint_kint_cpu(self, dev=cpu_dev):
self._kint_kint(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kint_kint_gpu(self, dev=gpu_dev):
self._kint_kint(gpu_dev)
def _kint_kint_bc(self, dev=gpu_dev):
a_np = np.array([[[17, 4, 9, 22, 18], [-9, 9, -1, -1, 4],
[1, 14, 7, 1, 4], [3, 14, -2, 3, -8]],
[[-25, 6, 8, -7, 22], [-14, 0, -1, 15, 14],
[1, 3, -8, -19, -3], [1, 12, 12, -3, -3]],
[[-10, -14, -17, 19, -5], [-4, -12, 7, -16, -2],
[-8, 3, -5, -11, 0], [4, 0, 3, -6, -3]]],
dtype=np.int32)
b_np = np.array([[-6, -3, -8, -17, 1], [-4, -16, 4, -9, 0],
[7, 1, 11, -12, 4], [-6, -8, -5, -3, 0]],
dtype=np.int32)
ta = tensor.from_numpy(a_np)
tb = tensor.from_numpy(b_np)
ta.to_device(dev)
tb.to_device(dev)
y = ta - tb
np.testing.assert_array_almost_equal(tensor.to_numpy(y), a_np - b_np)
def test_kint_kint_bc_cpu(self, dev=cpu_dev):
self._kint_kint_bc(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kint_kint_bc_gpu(self, dev=gpu_dev):
self._kint_kint_bc(gpu_dev)
if __name__ == '__main__':
unittest.main()
| true
| true
|
f7044ff308e002990bf93fd6f412f89dff8bcf34
| 4,580
|
py
|
Python
|
authors/apps/articles/tests/endpoints/test_get.py
|
andela/ah-jumanji-
|
a304718929936dd4a759d737fb3570d6cc25fb76
|
[
"BSD-3-Clause"
] | 1
|
2018-12-23T15:31:54.000Z
|
2018-12-23T15:31:54.000Z
|
authors/apps/articles/tests/endpoints/test_get.py
|
andela/ah-jumanji-
|
a304718929936dd4a759d737fb3570d6cc25fb76
|
[
"BSD-3-Clause"
] | 26
|
2018-11-27T09:13:15.000Z
|
2021-06-10T20:58:57.000Z
|
authors/apps/articles/tests/endpoints/test_get.py
|
andela/ah-jumanji-
|
a304718929936dd4a759d737fb3570d6cc25fb76
|
[
"BSD-3-Clause"
] | 2
|
2019-01-10T22:14:28.000Z
|
2019-11-04T07:33:43.000Z
|
import json
from rest_framework.test import APITestCase
from django.urls import reverse
from rest_framework import status
from django.contrib.auth import get_user_model
from authors.apps.articles.models import Articles
from authors.apps.profiles.models import Profile
class TestGetEndpoint(APITestCase):
def setUp(self):
""" Prepares table for tests """
self.token = self.get_user_token()
self.slug = "life_love_death"
self.title = "Life Love and Death"
self.description = "What is life?"
self.body = "This is the real life body."
self.tagList = "life,love,death"
self.author = 'TestAuthor'
self.article = Articles(
slug=self.slug,
title=self.title,
description=self.description,
body=self.body,
tagList=self.tagList,
author=Profile.objects.get(username=self.author))
self.article.save()
def test_get_all_articles(self):
"""
This tests getting all articles successfully
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_successfully_not_getting_articles_if_token_not_used(self):
"""
Unauthorized error returned if no token is passed in
"""
url = reverse('articles')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_get_article_id(self):
"""
Tests the pk of the article is true
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url)
self.assertIn(b"1", response.content)
def test_articles_are_paginated(self):
"""
This tests if the returned articles are paginated
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url).render()
# this checks the number of articles in the database
self.assertIn(b"1", response.content)
# next is null since there is only one article posted
self.assertIn(b"null", response.content)
# previous is null since only one article has been posted
# the page_size holds ten articles per page
self.assertIn(b"null", response.content) # previous
def test_get_specific_article(self):
"""
This gets a specific article
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articleSpecific', kwargs={'slug': 'life_love_death'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_getting_and_checking_articles_content(self):
"""
This checks if the right content of an article is returned
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url).render()
# checks if the body passed during posting is the one returned
self.assertIn(b"This is the real life body.", response.content)
# checks if id returned is 1
self.assertIn(b"1", response.content)
def test_wrong_request(self):
"""
Checks request for a non existing article
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse(
'articleSpecific', kwargs={
'slug': 'life_love_death_live'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response.render()
self.assertIn(b"Article does not exist", response.content)
def get_user_token(self):
user = {
"user": {
"username": "TestAuthor",
"email": "test_user@email.com",
"password": "test123user#Password"
}
}
response = self.client.post(
reverse('register'), data=user, format='json')
user = get_user_model()
user = user.objects.get(username="TestAuthor")
user.is_active = True
user.save()
response.render()
data = response.content
token = json.loads(data.decode('utf-8'))['user']['token']
return token
| 35.230769
| 76
| 0.627293
|
import json
from rest_framework.test import APITestCase
from django.urls import reverse
from rest_framework import status
from django.contrib.auth import get_user_model
from authors.apps.articles.models import Articles
from authors.apps.profiles.models import Profile
class TestGetEndpoint(APITestCase):
def setUp(self):
self.token = self.get_user_token()
self.slug = "life_love_death"
self.title = "Life Love and Death"
self.description = "What is life?"
self.body = "This is the real life body."
self.tagList = "life,love,death"
self.author = 'TestAuthor'
self.article = Articles(
slug=self.slug,
title=self.title,
description=self.description,
body=self.body,
tagList=self.tagList,
author=Profile.objects.get(username=self.author))
self.article.save()
def test_get_all_articles(self):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_successfully_not_getting_articles_if_token_not_used(self):
url = reverse('articles')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_get_article_id(self):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url)
self.assertIn(b"1", response.content)
def test_articles_are_paginated(self):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url).render()
self.assertIn(b"1", response.content)
self.assertIn(b"null", response.content)
self.assertIn(b"null", response.content)
def test_get_specific_article(self):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articleSpecific', kwargs={'slug': 'life_love_death'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_getting_and_checking_articles_content(self):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url).render()
self.assertIn(b"This is the real life body.", response.content)
self.assertIn(b"1", response.content)
def test_wrong_request(self):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse(
'articleSpecific', kwargs={
'slug': 'life_love_death_live'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response.render()
self.assertIn(b"Article does not exist", response.content)
def get_user_token(self):
user = {
"user": {
"username": "TestAuthor",
"email": "test_user@email.com",
"password": "test123user#Password"
}
}
response = self.client.post(
reverse('register'), data=user, format='json')
user = get_user_model()
user = user.objects.get(username="TestAuthor")
user.is_active = True
user.save()
response.render()
data = response.content
token = json.loads(data.decode('utf-8'))['user']['token']
return token
| true
| true
|
f70450e5817c83cfcb1a4c26acddb49086b5df92
| 566
|
py
|
Python
|
utilities/ResourceManager.py
|
sanazb/datastories-semeval2017-task4
|
c752620e3d694a1c5bcd444db8cf3e5ed5cd6651
|
[
"MIT"
] | 218
|
2017-05-15T13:36:34.000Z
|
2021-11-07T06:38:39.000Z
|
utilities/ResourceManager.py
|
sanazb/datastories-semeval2017-task4
|
c752620e3d694a1c5bcd444db8cf3e5ed5cd6651
|
[
"MIT"
] | 14
|
2017-07-24T07:45:58.000Z
|
2019-11-02T09:22:37.000Z
|
utilities/ResourceManager.py
|
sanazb/datastories-semeval2017-task4
|
c752620e3d694a1c5bcd444db8cf3e5ed5cd6651
|
[
"MIT"
] | 70
|
2017-05-12T08:06:56.000Z
|
2022-03-21T14:07:52.000Z
|
from abc import ABCMeta, abstractmethod
from frozendict import frozendict
class ResourceManager(metaclass=ABCMeta):
def __init__(self):
self.wv_filename = ""
self.parsed_filename = ""
@abstractmethod
def write(self):
"""
parse the raw file/files and write the data to disk
:return:
"""
pass
@abstractmethod
def read(self):
"""
read the parsed file from disk
:return:
"""
pass
def read_hashable(self):
return frozendict(self.read())
| 19.517241
| 59
| 0.583039
|
from abc import ABCMeta, abstractmethod
from frozendict import frozendict
class ResourceManager(metaclass=ABCMeta):
def __init__(self):
self.wv_filename = ""
self.parsed_filename = ""
@abstractmethod
def write(self):
pass
@abstractmethod
def read(self):
pass
def read_hashable(self):
return frozendict(self.read())
| true
| true
|
f70451dd563a66eff92f232ce71a939630bab2d2
| 22,696
|
py
|
Python
|
ssn_dataset.py
|
hyperfraise/action-detection
|
a3ee263ed701ed251cd0a79830ef796889ff366e
|
[
"BSD-3-Clause"
] | 1
|
2020-02-12T09:30:23.000Z
|
2020-02-12T09:30:23.000Z
|
ssn_dataset.py
|
hyperfraise/action-detection
|
a3ee263ed701ed251cd0a79830ef796889ff366e
|
[
"BSD-3-Clause"
] | null | null | null |
ssn_dataset.py
|
hyperfraise/action-detection
|
a3ee263ed701ed251cd0a79830ef796889ff366e
|
[
"BSD-3-Clause"
] | null | null | null |
import torch.utils.data as data
import os
import os.path
from numpy.random import randint
from ops.io import load_proposal_file
from transforms import *
from ops.utils import temporal_iou
class SSNInstance:
def __init__(
self,
start_frame,
end_frame,
video_frame_count,
fps=1,
label=None,
best_iou=None,
overlap_self=None,
):
self.start_frame = start_frame
self.end_frame = min(end_frame, video_frame_count)
self._label = label
self.fps = fps
self.coverage = (end_frame - start_frame) / video_frame_count
self.best_iou = best_iou
self.overlap_self = overlap_self
self.loc_reg = None
self.size_reg = None
def compute_regression_targets(self, gt_list, fg_thresh):
if self.best_iou < fg_thresh:
# background proposals do not need this
return
# find the groundtruth instance with the highest IOU
ious = [
temporal_iou(
(self.start_frame, self.end_frame), (gt.start_frame, gt.end_frame)
)
for gt in gt_list
]
best_gt_id = np.argmax(ious)
best_gt = gt_list[best_gt_id]
prop_center = (self.start_frame + self.end_frame) / 2
gt_center = (best_gt.start_frame + best_gt.end_frame) / 2
prop_size = self.end_frame - self.start_frame + 1
gt_size = best_gt.end_frame - best_gt.start_frame + 1
# get regression target:
# (1). center shift propotional to the proposal duration
# (2). logarithm of the groundtruth duration over proposal duraiton
self.loc_reg = (gt_center - prop_center) / prop_size
try:
self.size_reg = math.log(gt_size / prop_size)
except:
print((gt_size, prop_size, self.start_frame, self.end_frame))
raise
@property
def start_time(self):
return self.start_frame / self.fps
@property
def end_time(self):
return self.end_frame / self.fps
@property
def label(self):
return self._label if self._label is not None else -1
@property
def regression_targets(self):
return [self.loc_reg, self.size_reg] if self.loc_reg is not None else [0, 0]
class SSNVideoRecord:
def __init__(self, prop_record):
self._data = prop_record
frame_count = int(self._data[1])
# build instance record
self.gt = [
SSNInstance(
int(x[1]), int(x[2]), frame_count, label=int(x[0]), best_iou=1.0
)
for x in self._data[2]
if int(x[2]) > int(x[1])
]
self.gt = list([x for x in self.gt if x.start_frame < frame_count])
self.proposals = [
SSNInstance(
int(x[3]),
int(x[4]),
frame_count,
label=int(x[0]),
best_iou=float(x[1]),
overlap_self=float(x[2]),
)
for x in self._data[3]
if int(x[4]) > int(x[3])
]
self.proposals = list(
[x for x in self.proposals if x.start_frame < frame_count]
)
@property
def id(self):
return self._data[0]
@property
def num_frames(self):
return int(self._data[1])
def get_fg(self, fg_thresh, with_gt=True):
fg = [p for p in self.proposals if p.best_iou > fg_thresh]
if with_gt:
fg.extend(self.gt)
for x in fg:
x.compute_regression_targets(self.gt, fg_thresh)
return fg
def get_negatives(
self,
incomplete_iou_thresh,
bg_iou_thresh,
bg_coverage_thresh=0.01,
incomplete_overlap_thresh=0.7,
):
tag = [0] * len(self.proposals)
incomplete_props = []
background_props = []
for i in range(len(tag)):
if (
self.proposals[i].best_iou < incomplete_iou_thresh
and self.proposals[i].overlap_self > incomplete_overlap_thresh
):
tag[i] = 1 # incomplete
incomplete_props.append(self.proposals[i])
for i in range(len(tag)):
if (
tag[i] == 0
and self.proposals[i].best_iou < bg_iou_thresh
and self.proposals[i].coverage > bg_coverage_thresh
):
background_props.append(self.proposals[i])
return incomplete_props, background_props
class SSNDataSet(data.Dataset):
def __init__(
self,
root_path,
prop_file=None,
body_seg=5,
aug_seg=2,
video_centric=True,
new_length=1,
modality="RGB",
image_tmpl="img_{:05d}.jpg",
transform=None,
random_shift=True,
test_mode=False,
prop_per_video=8,
fg_ratio=1,
bg_ratio=1,
incomplete_ratio=6,
fg_iou_thresh=0.7,
bg_iou_thresh=0.01,
incomplete_iou_thresh=0.3,
bg_coverage_thresh=0.02,
incomplete_overlap_thresh=0.7,
gt_as_fg=True,
reg_stats=None,
test_interval=6,
verbose=True,
exclude_empty=True,
epoch_multiplier=1,
):
self.root_path = root_path
self.prop_file = prop_file
self.verbose = verbose
self.body_seg = body_seg
self.aug_seg = aug_seg
self.video_centric = video_centric
self.exclude_empty = exclude_empty
self.epoch_multiplier = epoch_multiplier
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self.test_interval = test_interval
self.fg_iou_thresh = fg_iou_thresh
self.incomplete_iou_thresh = incomplete_iou_thresh
self.bg_iou_thresh = bg_iou_thresh
self.bg_coverage_thresh = bg_coverage_thresh
self.incomplete_overlap_thresh = incomplete_overlap_thresh
self.starting_ratio = 0.5
self.ending_ratio = 0.5
self.gt_as_fg = gt_as_fg
denum = fg_ratio + bg_ratio + incomplete_ratio
self.fg_per_video = int(prop_per_video * (fg_ratio / denum))
self.bg_per_video = int(prop_per_video * (bg_ratio / denum))
self.incomplete_per_video = (
prop_per_video - self.fg_per_video - self.bg_per_video
)
self._parse_prop_file(stats=reg_stats)
def _load_image(self, directory, idx):
if self.modality == "RGB" or self.modality == "RGBDiff":
return [
Image.open(
os.path.join(directory, self.image_tmpl.format(idx))
).convert("RGB")
]
elif self.modality == "Flow":
x_img = Image.open(
os.path.join(directory, self.image_tmpl.format("x", idx))
).convert("L")
y_img = Image.open(
os.path.join(directory, self.image_tmpl.format("y", idx))
).convert("L")
return [x_img, y_img]
def _parse_prop_file(self, stats=None):
prop_info = load_proposal_file(self.prop_file)
self.video_list = [SSNVideoRecord(p) for p in prop_info]
if self.exclude_empty:
self.video_list = list([x for x in self.video_list if len(x.gt) > 0])
self.video_dict = {v.id: v for v in self.video_list}
# construct three pools:
# 1. Foreground
# 2. Background
# 3. Incomplete
self.fg_pool = []
self.bg_pool = []
self.incomp_pool = []
for v in self.video_list:
self.fg_pool.extend(
[(v.id, prop) for prop in v.get_fg(self.fg_iou_thresh, self.gt_as_fg)]
)
incomp, bg = v.get_negatives(
self.incomplete_iou_thresh,
self.bg_iou_thresh,
self.bg_coverage_thresh,
self.incomplete_overlap_thresh,
)
self.incomp_pool.extend([(v.id, prop) for prop in incomp])
self.bg_pool.extend([(v.id, prop) for prop in bg])
if stats is None:
self._compute_regresssion_stats()
else:
self.stats = stats
if self.verbose:
print(
(
"""
SSNDataset: Proposal file {prop_file} parsed.
There are {pnum} usable proposals from {vnum} videos.
{fnum} foreground proposals
{inum} incomplete_proposals
{bnum} background_proposals
Sampling config:
FG/BG/INC: {fr}/{br}/{ir}
Video Centric: {vc}
Epoch size multiplier: {em}
Regression Stats:
Location: mean {stats[0][0]:.05f} std {stats[1][0]:.05f}
Duration: mean {stats[0][1]:.05f} std {stats[1][1]:.05f}
""".format(
prop_file=self.prop_file,
pnum=len(self.fg_pool)
+ len(self.bg_pool)
+ len(self.incomp_pool),
fnum=len(self.fg_pool),
inum=len(self.incomp_pool),
bnum=len(self.bg_pool),
fr=self.fg_per_video,
br=self.bg_per_video,
ir=self.incomplete_per_video,
vnum=len(self.video_dict),
vc=self.video_centric,
stats=self.stats,
em=self.epoch_multiplier,
)
)
)
else:
print(
(
"""
SSNDataset: Proposal file {prop_file} parsed.
""".format(
prop_file=self.prop_file
)
)
)
def _video_centric_sampling(self, video):
fg = video.get_fg(self.fg_iou_thresh, self.gt_as_fg)
incomp, bg = video.get_negatives(
self.incomplete_iou_thresh,
self.bg_iou_thresh,
self.bg_coverage_thresh,
self.incomplete_overlap_thresh,
)
def sample_video_proposals(
proposal_type, video_id, video_pool, requested_num, dataset_pool
):
if len(video_pool) == 0:
# if there is nothing in the video pool, go fetch from the dataset pool
return [
(dataset_pool[x], proposal_type)
for x in np.random.choice(
len(dataset_pool), requested_num, replace=False
)
]
else:
replicate = len(video_pool) < requested_num
idx = np.random.choice(
len(video_pool), requested_num, replace=replicate
)
return [((video_id, video_pool[x]), proposal_type) for x in idx]
out_props = []
out_props.extend(
sample_video_proposals(0, video.id, fg, self.fg_per_video, self.fg_pool)
) # sample foreground
out_props.extend(
sample_video_proposals(
1, video.id, incomp, self.incomplete_per_video, self.incomp_pool
)
) # sample incomp.
out_props.extend(
sample_video_proposals(2, video.id, bg, self.bg_per_video, self.bg_pool)
) # sample background
return out_props
def _random_sampling(self):
out_props = []
out_props.extend(
[
(x, 0)
for x in np.random.choice(
self.fg_pool, self.fg_per_video, replace=False
)
]
)
out_props.extend(
[
(x, 1)
for x in np.random.choice(
self.incomp_pool, self.incomplete_per_video, replace=False
)
]
)
out_props.extend(
[
(x, 2)
for x in np.random.choice(
self.bg_pool, self.bg_per_video, replace=False
)
]
)
return out_props
def _sample_indices(self, valid_length, num_seg):
"""
:param record: VideoRecord
:return: list
"""
average_duration = (valid_length + 1) // num_seg
if average_duration > 0:
# normal cases
offsets = np.multiply(list(range(num_seg)), average_duration) + randint(
average_duration, size=num_seg
)
elif valid_length > num_seg:
offsets = np.sort(randint(valid_length, size=num_seg))
else:
offsets = np.zeros((num_seg,))
return offsets
def _get_val_indices(self, valid_length, num_seg):
if valid_length > num_seg:
tick = valid_length / float(num_seg)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(num_seg)])
else:
offsets = np.zeros((num_seg,))
return offsets
def _sample_ssn_indices(self, prop, frame_cnt):
start_frame = prop.start_frame + 1
end_frame = prop.end_frame
duration = end_frame - start_frame + 1
assert duration != 0, (prop.start_frame, prop.end_frame, prop.best_iou)
valid_length = duration - self.new_length
valid_starting = max(1, start_frame - int(duration * self.starting_ratio))
valid_ending = min(
frame_cnt - self.new_length + 1,
end_frame + int(duration * self.ending_ratio),
)
valid_starting_length = start_frame - valid_starting - self.new_length + 1
valid_ending_length = valid_ending - end_frame - self.new_length + 1
starting_scale = (valid_starting_length + self.new_length - 1) / (
duration * self.starting_ratio
)
ending_scale = (valid_ending_length + self.new_length - 1) / (
duration * self.ending_ratio
)
# get starting
starting_offsets = (
self._sample_indices(valid_starting_length, self.aug_seg)
if self.random_shift
else self._get_val_indices(valid_starting_length, self.aug_seg)
) + valid_starting
course_offsets = (
self._sample_indices(valid_length, self.body_seg)
if self.random_shift
else self._get_val_indices(valid_length, self.body_seg)
) + start_frame
ending_offsets = (
self._sample_indices(valid_ending_length, self.aug_seg)
if self.random_shift
else self._get_val_indices(valid_ending_length, self.aug_seg)
) + end_frame
offsets = np.concatenate((starting_offsets, course_offsets, ending_offsets))
stage_split = [
self.aug_seg,
self.aug_seg + self.body_seg,
self.aug_seg * 2 + self.body_seg,
]
return offsets, starting_scale, ending_scale, stage_split
def _load_prop_data(self, prop):
# read frame count
frame_cnt = self.video_dict[prop[0][0]].num_frames
# sample segment indices
prop_indices, starting_scale, ending_scale, stage_split = self._sample_ssn_indices(
prop[0][1], frame_cnt
)
# turn prop into standard format
# get label
if prop[1] == 0:
label = prop[0][1].label
elif prop[1] == 1:
label = prop[0][1].label # incomplete
elif prop[1] == 2:
label = 0 # background
else:
raise ValueError()
frames = []
for idx, seg_ind in enumerate(prop_indices):
p = int(seg_ind)
for x in range(self.new_length):
frames.extend(self._load_image(prop[0][0], min(frame_cnt, p + x)))
# get regression target
if prop[1] == 0:
reg_targets = prop[0][1].regression_targets
reg_targets = (
(reg_targets[0] - self.stats[0][0]) / self.stats[1][0],
(reg_targets[1] - self.stats[0][1]) / self.stats[1][1],
)
else:
reg_targets = (0.0, 0.0)
return (
frames,
label,
reg_targets,
starting_scale,
ending_scale,
stage_split,
prop[1],
)
def _compute_regresssion_stats(self):
if self.verbose:
print("computing regression target normalizing constants")
targets = []
for video in self.video_list:
fg = video.get_fg(self.fg_iou_thresh, False)
for p in fg:
targets.append(list(p.regression_targets))
self.stats = np.array((np.mean(targets, axis=0), np.std(targets, axis=0)))
def get_test_data(self, video, test_interval, gen_batchsize=4):
props = video.proposals
video_id = video.id
frame_cnt = video.num_frames
frame_ticks = (
np.arange(0, frame_cnt - self.new_length, test_interval, dtype=np.int) + 1
)
num_sampled_frames = len(frame_ticks)
# avoid empty proposal list
if len(props) == 0:
props.append(SSNInstance(0, frame_cnt - 1, frame_cnt))
# process proposals to subsampled sequences
rel_prop_list = []
proposal_tick_list = []
scaling_list = []
for proposal in props:
rel_prop = proposal.start_frame / frame_cnt, proposal.end_frame / frame_cnt
rel_duration = rel_prop[1] - rel_prop[0]
rel_starting_duration = rel_duration * self.starting_ratio
rel_ending_duration = rel_duration * self.ending_ratio
rel_starting = rel_prop[0] - rel_starting_duration
rel_ending = rel_prop[1] + rel_ending_duration
real_rel_starting = max(0.0, rel_starting)
real_rel_ending = min(1.0, rel_ending)
starting_scaling = (rel_prop[0] - real_rel_starting) / rel_starting_duration
ending_scaling = (real_rel_ending - rel_prop[1]) / rel_ending_duration
proposal_ticks = (
int(real_rel_starting * num_sampled_frames),
int(rel_prop[0] * num_sampled_frames),
int(rel_prop[1] * num_sampled_frames),
int(real_rel_ending * num_sampled_frames),
)
rel_prop_list.append(rel_prop)
proposal_tick_list.append(proposal_ticks)
scaling_list.append((starting_scaling, ending_scaling))
# load frames
# Since there are many frames for each video during testing, instead of returning the read frames,
# we return a generator which gives the frames in small batches, this lower the memory burden
# and runtime overhead. Usually setting batchsize=4 would fit most cases.
def frame_gen(batchsize):
frames = []
cnt = 0
for idx, seg_ind in enumerate(frame_ticks):
p = int(seg_ind)
for x in range(self.new_length):
frames.extend(self._load_image(video_id, min(frame_cnt, p + x)))
cnt += 1
if cnt % batchsize == 0:
frames = self.transform(frames)
yield frames
frames = []
if len(frames):
frames = self.transform(frames)
yield frames
return (
frame_gen(gen_batchsize),
len(frame_ticks),
torch.from_numpy(np.array(rel_prop_list)),
torch.from_numpy(np.array(proposal_tick_list)),
torch.from_numpy(np.array(scaling_list)),
)
def get_training_data(self, index):
if self.video_centric:
video = self.video_list[index]
props = self._video_centric_sampling(video)
else:
props = self._random_sampling()
out_frames = []
out_prop_len = []
out_prop_scaling = []
out_prop_type = []
out_prop_labels = []
out_prop_reg_targets = []
out_stage_split = []
for idx, p in enumerate(props):
prop_frames, prop_label, reg_targets, starting_scale, ending_scale, stage_split, prop_type = self._load_prop_data(
p
)
processed_frames = self.transform(prop_frames)
out_frames.append(processed_frames)
out_prop_len.append(self.body_seg + 2 * self.aug_seg)
out_prop_scaling.append([starting_scale, ending_scale])
out_prop_labels.append(prop_label)
out_prop_reg_targets.append(reg_targets)
out_prop_type.append(prop_type)
out_stage_split.append(stage_split)
out_prop_len = torch.from_numpy(np.array(out_prop_len))
out_prop_scaling = torch.from_numpy(
np.array(out_prop_scaling, dtype=np.float32)
)
out_prop_labels = torch.from_numpy(np.array(out_prop_labels))
out_prop_reg_targets = torch.from_numpy(
np.array(out_prop_reg_targets, dtype=np.float32)
)
out_prop_type = torch.from_numpy(np.array(out_prop_type))
out_stage_split = torch.from_numpy(np.array(out_stage_split))
out_frames = torch.cat(out_frames)
return (
out_frames,
out_prop_len,
out_prop_scaling,
out_prop_type,
out_prop_labels,
out_prop_reg_targets,
out_stage_split,
)
def get_all_gt(self):
gt_list = []
for video in self.video_list:
vid = video.id
gt_list.extend(
[
[
vid,
x.label - 1,
x.start_frame / video.num_frames,
x.end_frame / video.num_frames,
]
for x in video.gt
]
)
return gt_list
def __getitem__(self, index):
real_index = index % len(self.video_list)
if self.test_mode:
return self.get_test_data(self.video_list[real_index], self.test_interval)
else:
return self.get_training_data(real_index)
def __len__(self):
return len(self.video_list) * self.epoch_multiplier
| 32.330484
| 126
| 0.554195
|
import torch.utils.data as data
import os
import os.path
from numpy.random import randint
from ops.io import load_proposal_file
from transforms import *
from ops.utils import temporal_iou
class SSNInstance:
def __init__(
self,
start_frame,
end_frame,
video_frame_count,
fps=1,
label=None,
best_iou=None,
overlap_self=None,
):
self.start_frame = start_frame
self.end_frame = min(end_frame, video_frame_count)
self._label = label
self.fps = fps
self.coverage = (end_frame - start_frame) / video_frame_count
self.best_iou = best_iou
self.overlap_self = overlap_self
self.loc_reg = None
self.size_reg = None
def compute_regression_targets(self, gt_list, fg_thresh):
if self.best_iou < fg_thresh:
return
ious = [
temporal_iou(
(self.start_frame, self.end_frame), (gt.start_frame, gt.end_frame)
)
for gt in gt_list
]
best_gt_id = np.argmax(ious)
best_gt = gt_list[best_gt_id]
prop_center = (self.start_frame + self.end_frame) / 2
gt_center = (best_gt.start_frame + best_gt.end_frame) / 2
prop_size = self.end_frame - self.start_frame + 1
gt_size = best_gt.end_frame - best_gt.start_frame + 1
self.loc_reg = (gt_center - prop_center) / prop_size
try:
self.size_reg = math.log(gt_size / prop_size)
except:
print((gt_size, prop_size, self.start_frame, self.end_frame))
raise
@property
def start_time(self):
return self.start_frame / self.fps
@property
def end_time(self):
return self.end_frame / self.fps
@property
def label(self):
return self._label if self._label is not None else -1
@property
def regression_targets(self):
return [self.loc_reg, self.size_reg] if self.loc_reg is not None else [0, 0]
class SSNVideoRecord:
def __init__(self, prop_record):
self._data = prop_record
frame_count = int(self._data[1])
self.gt = [
SSNInstance(
int(x[1]), int(x[2]), frame_count, label=int(x[0]), best_iou=1.0
)
for x in self._data[2]
if int(x[2]) > int(x[1])
]
self.gt = list([x for x in self.gt if x.start_frame < frame_count])
self.proposals = [
SSNInstance(
int(x[3]),
int(x[4]),
frame_count,
label=int(x[0]),
best_iou=float(x[1]),
overlap_self=float(x[2]),
)
for x in self._data[3]
if int(x[4]) > int(x[3])
]
self.proposals = list(
[x for x in self.proposals if x.start_frame < frame_count]
)
@property
def id(self):
return self._data[0]
@property
def num_frames(self):
return int(self._data[1])
def get_fg(self, fg_thresh, with_gt=True):
fg = [p for p in self.proposals if p.best_iou > fg_thresh]
if with_gt:
fg.extend(self.gt)
for x in fg:
x.compute_regression_targets(self.gt, fg_thresh)
return fg
def get_negatives(
self,
incomplete_iou_thresh,
bg_iou_thresh,
bg_coverage_thresh=0.01,
incomplete_overlap_thresh=0.7,
):
tag = [0] * len(self.proposals)
incomplete_props = []
background_props = []
for i in range(len(tag)):
if (
self.proposals[i].best_iou < incomplete_iou_thresh
and self.proposals[i].overlap_self > incomplete_overlap_thresh
):
tag[i] = 1 incomplete_props.append(self.proposals[i])
for i in range(len(tag)):
if (
tag[i] == 0
and self.proposals[i].best_iou < bg_iou_thresh
and self.proposals[i].coverage > bg_coverage_thresh
):
background_props.append(self.proposals[i])
return incomplete_props, background_props
class SSNDataSet(data.Dataset):
def __init__(
self,
root_path,
prop_file=None,
body_seg=5,
aug_seg=2,
video_centric=True,
new_length=1,
modality="RGB",
image_tmpl="img_{:05d}.jpg",
transform=None,
random_shift=True,
test_mode=False,
prop_per_video=8,
fg_ratio=1,
bg_ratio=1,
incomplete_ratio=6,
fg_iou_thresh=0.7,
bg_iou_thresh=0.01,
incomplete_iou_thresh=0.3,
bg_coverage_thresh=0.02,
incomplete_overlap_thresh=0.7,
gt_as_fg=True,
reg_stats=None,
test_interval=6,
verbose=True,
exclude_empty=True,
epoch_multiplier=1,
):
self.root_path = root_path
self.prop_file = prop_file
self.verbose = verbose
self.body_seg = body_seg
self.aug_seg = aug_seg
self.video_centric = video_centric
self.exclude_empty = exclude_empty
self.epoch_multiplier = epoch_multiplier
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self.test_interval = test_interval
self.fg_iou_thresh = fg_iou_thresh
self.incomplete_iou_thresh = incomplete_iou_thresh
self.bg_iou_thresh = bg_iou_thresh
self.bg_coverage_thresh = bg_coverage_thresh
self.incomplete_overlap_thresh = incomplete_overlap_thresh
self.starting_ratio = 0.5
self.ending_ratio = 0.5
self.gt_as_fg = gt_as_fg
denum = fg_ratio + bg_ratio + incomplete_ratio
self.fg_per_video = int(prop_per_video * (fg_ratio / denum))
self.bg_per_video = int(prop_per_video * (bg_ratio / denum))
self.incomplete_per_video = (
prop_per_video - self.fg_per_video - self.bg_per_video
)
self._parse_prop_file(stats=reg_stats)
def _load_image(self, directory, idx):
if self.modality == "RGB" or self.modality == "RGBDiff":
return [
Image.open(
os.path.join(directory, self.image_tmpl.format(idx))
).convert("RGB")
]
elif self.modality == "Flow":
x_img = Image.open(
os.path.join(directory, self.image_tmpl.format("x", idx))
).convert("L")
y_img = Image.open(
os.path.join(directory, self.image_tmpl.format("y", idx))
).convert("L")
return [x_img, y_img]
def _parse_prop_file(self, stats=None):
prop_info = load_proposal_file(self.prop_file)
self.video_list = [SSNVideoRecord(p) for p in prop_info]
if self.exclude_empty:
self.video_list = list([x for x in self.video_list if len(x.gt) > 0])
self.video_dict = {v.id: v for v in self.video_list}
self.fg_pool = []
self.bg_pool = []
self.incomp_pool = []
for v in self.video_list:
self.fg_pool.extend(
[(v.id, prop) for prop in v.get_fg(self.fg_iou_thresh, self.gt_as_fg)]
)
incomp, bg = v.get_negatives(
self.incomplete_iou_thresh,
self.bg_iou_thresh,
self.bg_coverage_thresh,
self.incomplete_overlap_thresh,
)
self.incomp_pool.extend([(v.id, prop) for prop in incomp])
self.bg_pool.extend([(v.id, prop) for prop in bg])
if stats is None:
self._compute_regresssion_stats()
else:
self.stats = stats
if self.verbose:
print(
(
"""
SSNDataset: Proposal file {prop_file} parsed.
There are {pnum} usable proposals from {vnum} videos.
{fnum} foreground proposals
{inum} incomplete_proposals
{bnum} background_proposals
Sampling config:
FG/BG/INC: {fr}/{br}/{ir}
Video Centric: {vc}
Epoch size multiplier: {em}
Regression Stats:
Location: mean {stats[0][0]:.05f} std {stats[1][0]:.05f}
Duration: mean {stats[0][1]:.05f} std {stats[1][1]:.05f}
""".format(
prop_file=self.prop_file,
pnum=len(self.fg_pool)
+ len(self.bg_pool)
+ len(self.incomp_pool),
fnum=len(self.fg_pool),
inum=len(self.incomp_pool),
bnum=len(self.bg_pool),
fr=self.fg_per_video,
br=self.bg_per_video,
ir=self.incomplete_per_video,
vnum=len(self.video_dict),
vc=self.video_centric,
stats=self.stats,
em=self.epoch_multiplier,
)
)
)
else:
print(
(
"""
SSNDataset: Proposal file {prop_file} parsed.
""".format(
prop_file=self.prop_file
)
)
)
def _video_centric_sampling(self, video):
fg = video.get_fg(self.fg_iou_thresh, self.gt_as_fg)
incomp, bg = video.get_negatives(
self.incomplete_iou_thresh,
self.bg_iou_thresh,
self.bg_coverage_thresh,
self.incomplete_overlap_thresh,
)
def sample_video_proposals(
proposal_type, video_id, video_pool, requested_num, dataset_pool
):
if len(video_pool) == 0:
return [
(dataset_pool[x], proposal_type)
for x in np.random.choice(
len(dataset_pool), requested_num, replace=False
)
]
else:
replicate = len(video_pool) < requested_num
idx = np.random.choice(
len(video_pool), requested_num, replace=replicate
)
return [((video_id, video_pool[x]), proposal_type) for x in idx]
out_props = []
out_props.extend(
sample_video_proposals(0, video.id, fg, self.fg_per_video, self.fg_pool)
) out_props.extend(
sample_video_proposals(
1, video.id, incomp, self.incomplete_per_video, self.incomp_pool
)
) out_props.extend(
sample_video_proposals(2, video.id, bg, self.bg_per_video, self.bg_pool)
)
return out_props
def _random_sampling(self):
out_props = []
out_props.extend(
[
(x, 0)
for x in np.random.choice(
self.fg_pool, self.fg_per_video, replace=False
)
]
)
out_props.extend(
[
(x, 1)
for x in np.random.choice(
self.incomp_pool, self.incomplete_per_video, replace=False
)
]
)
out_props.extend(
[
(x, 2)
for x in np.random.choice(
self.bg_pool, self.bg_per_video, replace=False
)
]
)
return out_props
def _sample_indices(self, valid_length, num_seg):
average_duration = (valid_length + 1) // num_seg
if average_duration > 0:
offsets = np.multiply(list(range(num_seg)), average_duration) + randint(
average_duration, size=num_seg
)
elif valid_length > num_seg:
offsets = np.sort(randint(valid_length, size=num_seg))
else:
offsets = np.zeros((num_seg,))
return offsets
def _get_val_indices(self, valid_length, num_seg):
if valid_length > num_seg:
tick = valid_length / float(num_seg)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(num_seg)])
else:
offsets = np.zeros((num_seg,))
return offsets
def _sample_ssn_indices(self, prop, frame_cnt):
start_frame = prop.start_frame + 1
end_frame = prop.end_frame
duration = end_frame - start_frame + 1
assert duration != 0, (prop.start_frame, prop.end_frame, prop.best_iou)
valid_length = duration - self.new_length
valid_starting = max(1, start_frame - int(duration * self.starting_ratio))
valid_ending = min(
frame_cnt - self.new_length + 1,
end_frame + int(duration * self.ending_ratio),
)
valid_starting_length = start_frame - valid_starting - self.new_length + 1
valid_ending_length = valid_ending - end_frame - self.new_length + 1
starting_scale = (valid_starting_length + self.new_length - 1) / (
duration * self.starting_ratio
)
ending_scale = (valid_ending_length + self.new_length - 1) / (
duration * self.ending_ratio
)
starting_offsets = (
self._sample_indices(valid_starting_length, self.aug_seg)
if self.random_shift
else self._get_val_indices(valid_starting_length, self.aug_seg)
) + valid_starting
course_offsets = (
self._sample_indices(valid_length, self.body_seg)
if self.random_shift
else self._get_val_indices(valid_length, self.body_seg)
) + start_frame
ending_offsets = (
self._sample_indices(valid_ending_length, self.aug_seg)
if self.random_shift
else self._get_val_indices(valid_ending_length, self.aug_seg)
) + end_frame
offsets = np.concatenate((starting_offsets, course_offsets, ending_offsets))
stage_split = [
self.aug_seg,
self.aug_seg + self.body_seg,
self.aug_seg * 2 + self.body_seg,
]
return offsets, starting_scale, ending_scale, stage_split
def _load_prop_data(self, prop):
frame_cnt = self.video_dict[prop[0][0]].num_frames
prop_indices, starting_scale, ending_scale, stage_split = self._sample_ssn_indices(
prop[0][1], frame_cnt
)
if prop[1] == 0:
label = prop[0][1].label
elif prop[1] == 1:
label = prop[0][1].label elif prop[1] == 2:
label = 0 else:
raise ValueError()
frames = []
for idx, seg_ind in enumerate(prop_indices):
p = int(seg_ind)
for x in range(self.new_length):
frames.extend(self._load_image(prop[0][0], min(frame_cnt, p + x)))
if prop[1] == 0:
reg_targets = prop[0][1].regression_targets
reg_targets = (
(reg_targets[0] - self.stats[0][0]) / self.stats[1][0],
(reg_targets[1] - self.stats[0][1]) / self.stats[1][1],
)
else:
reg_targets = (0.0, 0.0)
return (
frames,
label,
reg_targets,
starting_scale,
ending_scale,
stage_split,
prop[1],
)
def _compute_regresssion_stats(self):
if self.verbose:
print("computing regression target normalizing constants")
targets = []
for video in self.video_list:
fg = video.get_fg(self.fg_iou_thresh, False)
for p in fg:
targets.append(list(p.regression_targets))
self.stats = np.array((np.mean(targets, axis=0), np.std(targets, axis=0)))
def get_test_data(self, video, test_interval, gen_batchsize=4):
props = video.proposals
video_id = video.id
frame_cnt = video.num_frames
frame_ticks = (
np.arange(0, frame_cnt - self.new_length, test_interval, dtype=np.int) + 1
)
num_sampled_frames = len(frame_ticks)
if len(props) == 0:
props.append(SSNInstance(0, frame_cnt - 1, frame_cnt))
rel_prop_list = []
proposal_tick_list = []
scaling_list = []
for proposal in props:
rel_prop = proposal.start_frame / frame_cnt, proposal.end_frame / frame_cnt
rel_duration = rel_prop[1] - rel_prop[0]
rel_starting_duration = rel_duration * self.starting_ratio
rel_ending_duration = rel_duration * self.ending_ratio
rel_starting = rel_prop[0] - rel_starting_duration
rel_ending = rel_prop[1] + rel_ending_duration
real_rel_starting = max(0.0, rel_starting)
real_rel_ending = min(1.0, rel_ending)
starting_scaling = (rel_prop[0] - real_rel_starting) / rel_starting_duration
ending_scaling = (real_rel_ending - rel_prop[1]) / rel_ending_duration
proposal_ticks = (
int(real_rel_starting * num_sampled_frames),
int(rel_prop[0] * num_sampled_frames),
int(rel_prop[1] * num_sampled_frames),
int(real_rel_ending * num_sampled_frames),
)
rel_prop_list.append(rel_prop)
proposal_tick_list.append(proposal_ticks)
scaling_list.append((starting_scaling, ending_scaling))
def frame_gen(batchsize):
frames = []
cnt = 0
for idx, seg_ind in enumerate(frame_ticks):
p = int(seg_ind)
for x in range(self.new_length):
frames.extend(self._load_image(video_id, min(frame_cnt, p + x)))
cnt += 1
if cnt % batchsize == 0:
frames = self.transform(frames)
yield frames
frames = []
if len(frames):
frames = self.transform(frames)
yield frames
return (
frame_gen(gen_batchsize),
len(frame_ticks),
torch.from_numpy(np.array(rel_prop_list)),
torch.from_numpy(np.array(proposal_tick_list)),
torch.from_numpy(np.array(scaling_list)),
)
def get_training_data(self, index):
if self.video_centric:
video = self.video_list[index]
props = self._video_centric_sampling(video)
else:
props = self._random_sampling()
out_frames = []
out_prop_len = []
out_prop_scaling = []
out_prop_type = []
out_prop_labels = []
out_prop_reg_targets = []
out_stage_split = []
for idx, p in enumerate(props):
prop_frames, prop_label, reg_targets, starting_scale, ending_scale, stage_split, prop_type = self._load_prop_data(
p
)
processed_frames = self.transform(prop_frames)
out_frames.append(processed_frames)
out_prop_len.append(self.body_seg + 2 * self.aug_seg)
out_prop_scaling.append([starting_scale, ending_scale])
out_prop_labels.append(prop_label)
out_prop_reg_targets.append(reg_targets)
out_prop_type.append(prop_type)
out_stage_split.append(stage_split)
out_prop_len = torch.from_numpy(np.array(out_prop_len))
out_prop_scaling = torch.from_numpy(
np.array(out_prop_scaling, dtype=np.float32)
)
out_prop_labels = torch.from_numpy(np.array(out_prop_labels))
out_prop_reg_targets = torch.from_numpy(
np.array(out_prop_reg_targets, dtype=np.float32)
)
out_prop_type = torch.from_numpy(np.array(out_prop_type))
out_stage_split = torch.from_numpy(np.array(out_stage_split))
out_frames = torch.cat(out_frames)
return (
out_frames,
out_prop_len,
out_prop_scaling,
out_prop_type,
out_prop_labels,
out_prop_reg_targets,
out_stage_split,
)
def get_all_gt(self):
gt_list = []
for video in self.video_list:
vid = video.id
gt_list.extend(
[
[
vid,
x.label - 1,
x.start_frame / video.num_frames,
x.end_frame / video.num_frames,
]
for x in video.gt
]
)
return gt_list
def __getitem__(self, index):
real_index = index % len(self.video_list)
if self.test_mode:
return self.get_test_data(self.video_list[real_index], self.test_interval)
else:
return self.get_training_data(real_index)
def __len__(self):
return len(self.video_list) * self.epoch_multiplier
| true
| true
|
f704520d1a228703aaf40ee1af453d7651947d38
| 45
|
py
|
Python
|
routes/websocket/__init__.py
|
ceyzaguirre4/starlette-mvc
|
03d0f38e11669e988a084e84b890ecdcca449f64
|
[
"MIT"
] | 8
|
2019-06-19T15:32:47.000Z
|
2021-02-01T19:57:26.000Z
|
routes/websocket/__init__.py
|
ceyzaguirre4/starlette-mvc
|
03d0f38e11669e988a084e84b890ecdcca449f64
|
[
"MIT"
] | null | null | null |
routes/websocket/__init__.py
|
ceyzaguirre4/starlette-mvc
|
03d0f38e11669e988a084e84b890ecdcca449f64
|
[
"MIT"
] | 2
|
2019-07-31T22:23:56.000Z
|
2021-02-01T19:57:29.000Z
|
from .routes import app as websockets_routes
| 22.5
| 44
| 0.844444
|
from .routes import app as websockets_routes
| true
| true
|
f7045338c41d6965d06ef3953f92771273c53481
| 786
|
py
|
Python
|
server/models/utils.py
|
Justinyu1618/Coronalert
|
df7d66bec147ea1f47105102582bc25469e4bee2
|
[
"MIT"
] | 2
|
2020-04-19T07:08:39.000Z
|
2020-06-01T21:22:07.000Z
|
server/models/utils.py
|
HackCameroon/Coronalert
|
df7d66bec147ea1f47105102582bc25469e4bee2
|
[
"MIT"
] | 3
|
2020-10-13T01:06:56.000Z
|
2022-02-27T01:51:31.000Z
|
server/models/utils.py
|
HackCameroon/Coronalert
|
df7d66bec147ea1f47105102582bc25469e4bee2
|
[
"MIT"
] | 1
|
2020-05-08T08:37:15.000Z
|
2020-05-08T08:37:15.000Z
|
import json
from server import db
from sqlalchemy.ext import mutable
class JsonEncodedDict(db.TypeDecorator):
impl = db.Text
def process_bind_param(self, value, dialect):
if value is None:
return '{}'
else:
return json.dumps(value)
def process_result_value(self, value, dialect):
if value is None:
return {}
else:
return json.loads(value)
mutable.MutableDict.associate_with(JsonEncodedDict)
user_location_table = db.Table('user_location_table',
db.Column('user_id', db.Integer, db.ForeignKey('user.id'), nullable=False),
db.Column('location_id',db.Integer, db.ForeignKey('location.id'), nullable=False),
)
| 32.75
| 110
| 0.604326
|
import json
from server import db
from sqlalchemy.ext import mutable
class JsonEncodedDict(db.TypeDecorator):
impl = db.Text
def process_bind_param(self, value, dialect):
if value is None:
return '{}'
else:
return json.dumps(value)
def process_result_value(self, value, dialect):
if value is None:
return {}
else:
return json.loads(value)
mutable.MutableDict.associate_with(JsonEncodedDict)
user_location_table = db.Table('user_location_table',
db.Column('user_id', db.Integer, db.ForeignKey('user.id'), nullable=False),
db.Column('location_id',db.Integer, db.ForeignKey('location.id'), nullable=False),
)
| true
| true
|
f704533cb05012bfc523241ab664a84ebc5b8dad
| 7,054
|
py
|
Python
|
obsolete/reports/pipeline_capseq/trackers/macs_replicated_intervals.py
|
kevinrue/cgat-flow
|
02b5a1867253c2f6fd6b4f3763e0299115378913
|
[
"MIT"
] | 11
|
2018-09-07T11:33:23.000Z
|
2022-01-07T12:16:11.000Z
|
obsolete/reports/pipeline_capseq/trackers/macs_replicated_intervals.py
|
kevinrue/cgat-flow
|
02b5a1867253c2f6fd6b4f3763e0299115378913
|
[
"MIT"
] | 102
|
2018-03-22T15:35:26.000Z
|
2022-03-23T17:46:16.000Z
|
obsolete/reports/pipeline_capseq/trackers/macs_replicated_intervals.py
|
kevinrue/cgat-flow
|
02b5a1867253c2f6fd6b4f3763e0299115378913
|
[
"MIT"
] | 7
|
2018-06-11T15:01:41.000Z
|
2020-03-31T09:29:33.000Z
|
import os
import sys
import re
import types
import itertools
import matplotlib.pyplot as plt
import numpy
import scipy.stats
import numpy.ma
import Stats
import Histogram
from cgatReport.Tracker import *
from cpgReport import *
##########################################################################
class replicatedIntervalSummary(cpgTracker):
"""Summary stats of intervals called by the peak finder. """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getRow(
"SELECT COUNT(*) as Intervals, round(AVG(length),0) as Mean_length, round(AVG(nprobes),0) as Mean_reads FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
class replicatedIntervalLengths(cpgTracker):
"""Distribution of interval length. """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT length FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
class replicatedIntervalPeakValues(cpgTracker):
"""Distribution of maximum interval coverage (the number of reads at peak). """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT peakval FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
class replicatedIntervalAverageValues(cpgTracker):
"""Distribution of average coverage (the average number of reads within the interval) """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT avgval FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
class replicatedIntervalFoldChange(cpgTracker):
"""return fold changes for all intervals. """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT fold FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
##########################################################################
##########################################################################
class replicatedIntervalPeakLocation(cpgTracker):
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT (PeakCenter - start) / CAST( Length as FLOAT) - 0.5 FROM %(track)s_replicated_intervals" % locals())
data2 = self.getValues(
"SELECT (end - PeakCenter) / CAST( Length as FLOAT) - 0.5 FROM %(track)s_replicated_intervals" % locals())
return {"distance": data1 + data2}
##########################################################################
class replicatedIntervalPeakDistance(cpgTracker):
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT PeakCenter - start FROM %(track)s_replicated_intervals" % locals())
data2 = self.getValues(
"SELECT end - PeakCenter FROM %(track)s_replicated_intervals" % locals())
return {"distance": data1 + data2}
##########################################################################
##########################################################################
##########################################################################
class replicatedIntervalCpGDensity(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
##########################################################################
class replicatedIntervalCpGObsExp1(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
##########################################################################
class replicatedIntervalCpGObsExp2(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
##########################################################################
class replicatedIntervalGCContent(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
| 38.546448
| 164
| 0.568897
|
import os
import sys
import re
import types
import itertools
import matplotlib.pyplot as plt
import numpy
import scipy.stats
import numpy.ma
import Stats
import Histogram
from cgatReport.Tracker import *
from cpgReport import *
class replicatedIntervalSummary(cpgTracker):
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getRow(
"SELECT COUNT(*) as Intervals, round(AVG(length),0) as Mean_length, round(AVG(nprobes),0) as Mean_reads FROM %(track)s_replicated_intervals" % locals())
return data
class replicatedIntervalLengths(cpgTracker):
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT length FROM %(track)s_replicated_intervals" % locals())
return data
class replicatedIntervalPeakValues(cpgTracker):
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT peakval FROM %(track)s_replicated_intervals" % locals())
return data
class replicatedIntervalAverageValues(cpgTracker):
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT avgval FROM %(track)s_replicated_intervals" % locals())
return data
class replicatedIntervalFoldChange(cpgTracker):
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT fold FROM %(track)s_replicated_intervals" % locals())
return data
class replicatedIntervalPeakLocation(cpgTracker):
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT (PeakCenter - start) / CAST( Length as FLOAT) - 0.5 FROM %(track)s_replicated_intervals" % locals())
data2 = self.getValues(
"SELECT (end - PeakCenter) / CAST( Length as FLOAT) - 0.5 FROM %(track)s_replicated_intervals" % locals())
return {"distance": data1 + data2}
class replicatedIntervalPeakDistance(cpgTracker):
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT PeakCenter - start FROM %(track)s_replicated_intervals" % locals())
data2 = self.getValues(
"SELECT end - PeakCenter FROM %(track)s_replicated_intervals" % locals())
return {"distance": data1 + data2}
class replicatedIntervalCpGDensity(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
class replicatedIntervalCpGObsExp1(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
class replicatedIntervalCpGObsExp2(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
class replicatedIntervalGCContent(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
| true
| true
|
f7045379712a0cda1d66cb3115fa1f0870d8720e
| 689
|
py
|
Python
|
library/migrations/0002_auto_20180704_0002.py
|
doriclazar/peak_30
|
a87217e4d0d1f96d39ad214d40a879c7abfaaaee
|
[
"Apache-2.0"
] | null | null | null |
library/migrations/0002_auto_20180704_0002.py
|
doriclazar/peak_30
|
a87217e4d0d1f96d39ad214d40a879c7abfaaaee
|
[
"Apache-2.0"
] | 1
|
2018-07-14T07:35:55.000Z
|
2018-07-16T07:40:49.000Z
|
library/migrations/0002_auto_20180704_0002.py
|
doriclazar/peak_30
|
a87217e4d0d1f96d39ad214d40a879c7abfaaaee
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-07-04 00:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('library', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='module',
name='professions',
),
migrations.AddField(
model_name='module',
name='profession',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.DO_NOTHING, to='library.Profession'),
preserve_default=False,
),
]
| 25.518519
| 120
| 0.619739
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('library', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='module',
name='professions',
),
migrations.AddField(
model_name='module',
name='profession',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.DO_NOTHING, to='library.Profession'),
preserve_default=False,
),
]
| true
| true
|
f704561340c1d7a365b883b6ae1bea0dbbbbec2d
| 333
|
py
|
Python
|
test_settings.py
|
pwilczynskiclearcode/django-nuit
|
e1b619c00db36fba48683e9cf3d51cf4460f99c8
|
[
"Apache-2.0"
] | 5
|
2016-05-15T12:43:24.000Z
|
2018-10-06T07:45:38.000Z
|
test_settings.py
|
pwilczynskiclearcode/django-nuit
|
e1b619c00db36fba48683e9cf3d51cf4460f99c8
|
[
"Apache-2.0"
] | 12
|
2016-04-21T22:01:55.000Z
|
2017-04-20T09:27:56.000Z
|
test_settings.py
|
pwilczynskiclearcode/django-nuit
|
e1b619c00db36fba48683e9cf3d51cf4460f99c8
|
[
"Apache-2.0"
] | 6
|
2016-04-21T23:27:48.000Z
|
2018-02-22T16:24:11.000Z
|
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
ROOT_URLCONF = 'django_autoconfig.autourlconf'
INSTALLED_APPS = [
'django.contrib.auth',
'nuit',
]
STATIC_URL = '/static/'
STATIC_ROOT = '.static'
from django_autoconfig.autoconfig import configure_settings
configure_settings(globals())
| 22.2
| 59
| 0.6997
|
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
ROOT_URLCONF = 'django_autoconfig.autourlconf'
INSTALLED_APPS = [
'django.contrib.auth',
'nuit',
]
STATIC_URL = '/static/'
STATIC_ROOT = '.static'
from django_autoconfig.autoconfig import configure_settings
configure_settings(globals())
| true
| true
|
f70456c2fe01d36dc70a451996e2eedc3ab16d0d
| 3,307
|
py
|
Python
|
src/my_blog/settings.py
|
zainab66/blog-django-ar
|
5e2643f40afb11f648841fd2192a459f6141505b
|
[
"bzip2-1.0.6"
] | 1
|
2020-02-16T02:52:25.000Z
|
2020-02-16T02:52:25.000Z
|
src/my_blog/settings.py
|
zainab66/blog-django-ar
|
5e2643f40afb11f648841fd2192a459f6141505b
|
[
"bzip2-1.0.6"
] | 2
|
2021-03-18T23:50:25.000Z
|
2021-09-22T18:35:25.000Z
|
src/my_blog/settings.py
|
zainab66/blog-django-ar
|
5e2643f40afb11f648841fd2192a459f6141505b
|
[
"bzip2-1.0.6"
] | null | null | null |
"""
Django settings for my_blog project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@7+q1q@_=iniipvuc%nfs)5qauaax2g0cnc1fxzos52t-9ml=m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'sarah.1024z@gmail.com'
EMAIL_HOST_PASSWORD = 'rzan2015'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'my_blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'my_blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| 26.03937
| 91
| 0.702449
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '@7+q1q@_=iniipvuc%nfs)5qauaax2g0cnc1fxzos52t-9ml=m'
DEBUG = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'sarah.1024z@gmail.com'
EMAIL_HOST_PASSWORD = 'rzan2015'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'my_blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'my_blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| true
| true
|
f70456d0ba561c2a6d0de8e56a180302aea268a7
| 4,949
|
py
|
Python
|
samcli/commands/package/package_context.py
|
kylelaker/aws-sam-cli
|
d2917102ef56ac05b9973f96c716612f9638bb62
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
samcli/commands/package/package_context.py
|
kylelaker/aws-sam-cli
|
d2917102ef56ac05b9973f96c716612f9638bb62
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
samcli/commands/package/package_context.py
|
kylelaker/aws-sam-cli
|
d2917102ef56ac05b9973f96c716612f9638bb62
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
"""
Logic for uploading to s3 based on supplied template file and s3 bucket
"""
# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import logging
import os
import boto3
import click
import docker
from botocore.config import Config
from samcli.commands.package.exceptions import PackageFailedError
from samcli.lib.package.artifact_exporter import Template
from samcli.lib.package.ecr_uploader import ECRUploader
from samcli.lib.package.code_signer import CodeSigner
from samcli.lib.package.s3_uploader import S3Uploader
from samcli.lib.utils.botoconfig import get_boto_config_with_user_agent
from samcli.yamlhelper import yaml_dump
LOG = logging.getLogger(__name__)
class PackageContext:
MSG_PACKAGED_TEMPLATE_WRITTEN = (
"\nSuccessfully packaged artifacts and wrote output template "
"to file {output_file_name}."
"\n"
"Execute the following command to deploy the packaged template"
"\n"
"sam deploy --template-file {output_file_path} "
"--stack-name <YOUR STACK NAME>"
"\n"
)
def __init__(
self,
template_file,
s3_bucket,
image_repository,
s3_prefix,
kms_key_id,
output_template_file,
use_json,
force_upload,
no_progressbar,
metadata,
region,
profile,
on_deploy=False,
signing_profiles=None,
):
self.template_file = template_file
self.s3_bucket = s3_bucket
self.image_repository = image_repository
self.s3_prefix = s3_prefix
self.kms_key_id = kms_key_id
self.output_template_file = output_template_file
self.use_json = use_json
self.force_upload = force_upload
self.no_progressbar = no_progressbar
self.metadata = metadata
self.region = region
self.profile = profile
self.on_deploy = on_deploy
self.s3_uploader = None
self.code_signer = None
self.signing_profiles = signing_profiles
self.ecr_uploader = None
self.uploader = {}
def __enter__(self):
return self
def __exit__(self, *args):
pass
def run(self):
region_name = self.region if self.region else None
s3_client = boto3.client(
"s3",
config=get_boto_config_with_user_agent(signature_version="s3v4", region_name=region_name),
)
ecr_client = boto3.client("ecr", config=get_boto_config_with_user_agent(region_name=region_name))
docker_client = docker.from_env()
self.s3_uploader = S3Uploader(
s3_client, self.s3_bucket, self.s3_prefix, self.kms_key_id, self.force_upload, self.no_progressbar
)
# attach the given metadata to the artifacts to be uploaded
self.s3_uploader.artifact_metadata = self.metadata
self.ecr_uploader = ECRUploader(docker_client, ecr_client, self.image_repository)
code_signer_client = boto3.client("signer")
self.code_signer = CodeSigner(code_signer_client, self.signing_profiles)
# NOTE(srirammv): move this to its own class.
self.uploader = {"s3": self.s3_uploader, "ecr": self.ecr_uploader}
try:
exported_str = self._export(self.template_file, self.use_json)
self.write_output(self.output_template_file, exported_str)
if self.output_template_file and not self.on_deploy:
msg = self.MSG_PACKAGED_TEMPLATE_WRITTEN.format(
output_file_name=self.output_template_file,
output_file_path=os.path.abspath(self.output_template_file),
)
click.echo(msg)
except OSError as ex:
raise PackageFailedError(template_file=self.template_file, ex=str(ex)) from ex
def _export(self, template_path, use_json):
template = Template(template_path, os.getcwd(), self.uploader, self.code_signer)
exported_template = template.export()
if use_json:
exported_str = json.dumps(exported_template, indent=4, ensure_ascii=False)
else:
exported_str = yaml_dump(exported_template)
return exported_str
def write_output(self, output_file_name, data):
if output_file_name is None:
click.echo(data)
return
with open(output_file_name, "w") as fp:
fp.write(data)
| 33.214765
| 110
| 0.6765
|
import json
import logging
import os
import boto3
import click
import docker
from botocore.config import Config
from samcli.commands.package.exceptions import PackageFailedError
from samcli.lib.package.artifact_exporter import Template
from samcli.lib.package.ecr_uploader import ECRUploader
from samcli.lib.package.code_signer import CodeSigner
from samcli.lib.package.s3_uploader import S3Uploader
from samcli.lib.utils.botoconfig import get_boto_config_with_user_agent
from samcli.yamlhelper import yaml_dump
LOG = logging.getLogger(__name__)
class PackageContext:
MSG_PACKAGED_TEMPLATE_WRITTEN = (
"\nSuccessfully packaged artifacts and wrote output template "
"to file {output_file_name}."
"\n"
"Execute the following command to deploy the packaged template"
"\n"
"sam deploy --template-file {output_file_path} "
"--stack-name <YOUR STACK NAME>"
"\n"
)
def __init__(
self,
template_file,
s3_bucket,
image_repository,
s3_prefix,
kms_key_id,
output_template_file,
use_json,
force_upload,
no_progressbar,
metadata,
region,
profile,
on_deploy=False,
signing_profiles=None,
):
self.template_file = template_file
self.s3_bucket = s3_bucket
self.image_repository = image_repository
self.s3_prefix = s3_prefix
self.kms_key_id = kms_key_id
self.output_template_file = output_template_file
self.use_json = use_json
self.force_upload = force_upload
self.no_progressbar = no_progressbar
self.metadata = metadata
self.region = region
self.profile = profile
self.on_deploy = on_deploy
self.s3_uploader = None
self.code_signer = None
self.signing_profiles = signing_profiles
self.ecr_uploader = None
self.uploader = {}
def __enter__(self):
return self
def __exit__(self, *args):
pass
def run(self):
region_name = self.region if self.region else None
s3_client = boto3.client(
"s3",
config=get_boto_config_with_user_agent(signature_version="s3v4", region_name=region_name),
)
ecr_client = boto3.client("ecr", config=get_boto_config_with_user_agent(region_name=region_name))
docker_client = docker.from_env()
self.s3_uploader = S3Uploader(
s3_client, self.s3_bucket, self.s3_prefix, self.kms_key_id, self.force_upload, self.no_progressbar
)
self.s3_uploader.artifact_metadata = self.metadata
self.ecr_uploader = ECRUploader(docker_client, ecr_client, self.image_repository)
code_signer_client = boto3.client("signer")
self.code_signer = CodeSigner(code_signer_client, self.signing_profiles)
self.uploader = {"s3": self.s3_uploader, "ecr": self.ecr_uploader}
try:
exported_str = self._export(self.template_file, self.use_json)
self.write_output(self.output_template_file, exported_str)
if self.output_template_file and not self.on_deploy:
msg = self.MSG_PACKAGED_TEMPLATE_WRITTEN.format(
output_file_name=self.output_template_file,
output_file_path=os.path.abspath(self.output_template_file),
)
click.echo(msg)
except OSError as ex:
raise PackageFailedError(template_file=self.template_file, ex=str(ex)) from ex
def _export(self, template_path, use_json):
template = Template(template_path, os.getcwd(), self.uploader, self.code_signer)
exported_template = template.export()
if use_json:
exported_str = json.dumps(exported_template, indent=4, ensure_ascii=False)
else:
exported_str = yaml_dump(exported_template)
return exported_str
def write_output(self, output_file_name, data):
if output_file_name is None:
click.echo(data)
return
with open(output_file_name, "w") as fp:
fp.write(data)
| true
| true
|
f7045706a79e9f09f2b7bf296887bce361af2fb5
| 1,658
|
py
|
Python
|
src/model/vdsr.py
|
delldu/EDSR
|
98752b57a3091e693c523e710380d369f9913041
|
[
"MIT"
] | 1
|
2019-10-19T13:28:30.000Z
|
2019-10-19T13:28:30.000Z
|
src/model/vdsr.py
|
delldu/EDSR
|
98752b57a3091e693c523e710380d369f9913041
|
[
"MIT"
] | null | null | null |
src/model/vdsr.py
|
delldu/EDSR
|
98752b57a3091e693c523e710380d369f9913041
|
[
"MIT"
] | null | null | null |
from model import common
import torch.nn as nn
import torch.nn.init as init
url = {
'r20f64': ''
}
def make_model(args, parent=False):
return VDSR(args)
class VDSR(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(VDSR, self).__init__()
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
url_name = 'r{}f{}'.format(n_resblocks, n_feats)
if url_name in url:
self.url = url[url_name]
else:
self.url = None
self.sub_mean = common.MeanShift(args.rgb_range)
self.add_mean = common.MeanShift(args.rgb_range, sign=1)
def basic_block(in_channels, out_channels, act):
return common.BasicBlock(
conv, in_channels, out_channels, kernel_size,
bias=True, bn=False, act=act
)
# define body module
m_body = []
m_body.append(basic_block(args.n_colors, n_feats, nn.ReLU(True)))
for _ in range(n_resblocks - 2):
m_body.append(basic_block(n_feats, n_feats, nn.ReLU(True)))
m_body.append(basic_block(n_feats, args.n_colors, None))
self.body = nn.Sequential(*m_body)
def forward(self, x):
x = self.sub_mean(x)
res = self.body(x)
res += x
x = self.add_mean(res)
return x
# cd ..(src), export PYTHONPATH=`pwd`
# if __name__ == '__main__':
# import torch
# import utility
# from option import args
# torch.manual_seed(args.seed)
# checkpoint = utility.checkpoint(args)
# print(args)
# model = VDSR(args)
# print(model)
| 25.507692
| 73
| 0.598311
|
from model import common
import torch.nn as nn
import torch.nn.init as init
url = {
'r20f64': ''
}
def make_model(args, parent=False):
return VDSR(args)
class VDSR(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(VDSR, self).__init__()
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
url_name = 'r{}f{}'.format(n_resblocks, n_feats)
if url_name in url:
self.url = url[url_name]
else:
self.url = None
self.sub_mean = common.MeanShift(args.rgb_range)
self.add_mean = common.MeanShift(args.rgb_range, sign=1)
def basic_block(in_channels, out_channels, act):
return common.BasicBlock(
conv, in_channels, out_channels, kernel_size,
bias=True, bn=False, act=act
)
m_body = []
m_body.append(basic_block(args.n_colors, n_feats, nn.ReLU(True)))
for _ in range(n_resblocks - 2):
m_body.append(basic_block(n_feats, n_feats, nn.ReLU(True)))
m_body.append(basic_block(n_feats, args.n_colors, None))
self.body = nn.Sequential(*m_body)
def forward(self, x):
x = self.sub_mean(x)
res = self.body(x)
res += x
x = self.add_mean(res)
return x
| true
| true
|
f704591e6a08033243efa0eee051057ba7a55fd2
| 9,454
|
py
|
Python
|
src/zeit/content/image/transform.py
|
ZeitOnline/zeit.content.image
|
0ea8d125f8ff7a2a4d8333542cded9856e25805a
|
[
"BSD-3-Clause"
] | null | null | null |
src/zeit/content/image/transform.py
|
ZeitOnline/zeit.content.image
|
0ea8d125f8ff7a2a4d8333542cded9856e25805a
|
[
"BSD-3-Clause"
] | 11
|
2016-02-25T15:22:34.000Z
|
2019-02-26T12:20:59.000Z
|
src/zeit/content/image/transform.py
|
ZeitOnline/zeit.content.image
|
0ea8d125f8ff7a2a4d8333542cded9856e25805a
|
[
"BSD-3-Clause"
] | 3
|
2015-07-28T11:11:56.000Z
|
2016-11-15T13:23:57.000Z
|
import PIL.Image
import PIL.ImageColor
import PIL.ImageEnhance
import zeit.cms.repository.folder
import zeit.connector.interfaces
import zeit.content.image.interfaces
import zope.app.appsetup.product
import zope.component
import zope.interface
import zope.security.proxy
class ImageTransform(object):
zope.interface.implements(zeit.content.image.interfaces.ITransform)
zope.component.adapts(zeit.content.image.interfaces.IImage)
MAXIMUM_IMAGE_SIZE = 5000
def __init__(self, context):
self.context = context
try:
self.image = PIL.Image.open(
zope.security.proxy.removeSecurityProxy(context.open()))
self.image.load()
except IOError:
raise zeit.content.image.interfaces.ImageProcessingError(
"Cannot transform image %s" % context.__name__)
def thumbnail(self, width, height, filter=PIL.Image.ANTIALIAS):
image = self.image.copy()
image.thumbnail((width, height), filter)
return self._construct_image(image)
def resize(self, width=None, height=None, filter=PIL.Image.ANTIALIAS):
if width is None and height is None:
raise TypeError('Need at least one of width and height.')
orig_width, orig_height = self.image.size
if width is None:
width = orig_width * height / orig_height
elif height is None:
height = orig_height * width / orig_width
image = self.image.resize((width, height), filter)
return self._construct_image(image)
def create_variant_image(
self, variant, size=None, fill_color=None, format=None):
"""Create variant image from source image.
Will crop the image according to the zoom, focus point and size. In
addition, the image is scaled down to size (if given) and image
enhancements, like brightness, are applied.
The default variant skips cropping, but still applies image
enhancements, so it can be used as a high quality preview of image
enhancements in the frontend.
"""
if not variant.is_default:
image = self._crop_variant_image(variant, size=size)
else:
# Alpha channel is usually activated when cropping,
# so we must do it by hand since we skipped cropping
image = self._enable_alpha_channel(self.image)
# Apply enhancements like brightness
if variant.brightness is not None:
image = PIL.ImageEnhance.Brightness(image).enhance(
variant.brightness)
if variant.contrast is not None:
image = PIL.ImageEnhance.Contrast(image).enhance(
variant.contrast)
if variant.saturation is not None:
image = PIL.ImageEnhance.Color(image).enhance(
variant.saturation)
if variant.sharpness is not None:
image = PIL.ImageEnhance.Sharpness(image).enhance(
variant.sharpness)
# Optionally fill the background of transparent images
if fill_color is not None and self._color_mode == 'RGBA':
fill_color = PIL.ImageColor.getrgb('#' + fill_color)
opaque = PIL.Image.new('RGB', image.size, fill_color)
opaque.paste(image, (0, 0), image)
image = opaque
return self._construct_image(image, format)
def _crop_variant_image(self, variant, size=None):
"""Crop variant image from source image.
Determines crop position using zoom, focus point and size constraint.
The result image will have the exact dimensions that are predefined by
the size argument, if provided. Otherwise it depends on the variant
ratio and zoom only, giving back the best image quality, i.e. will not
scale down.
"""
source_width, source_height = self.image.size
if (source_width == 0 or source_height == 0):
return self.image
zoomed_width = source_width
zoomed_height = source_height
if variant.zoom > 0:
zoomed_width = int(source_width * variant.zoom)
zoomed_height = int(source_height * variant.zoom)
target_ratio = variant.ratio
if target_ratio is None:
target_ratio = float(source_width) / float(source_height)
target_width, target_height = self._fit_ratio_to_image(
zoomed_width, zoomed_height, target_ratio)
if size:
w, h = size
override_ratio = float(w) / float(h)
target_width, target_height = self._fit_ratio_to_image(
target_width, target_height, override_ratio)
x, y = self._determine_crop_position(
variant, target_width, target_height)
image = self._crop(
self.image, x, y, x + target_width, y + target_height)
if size:
w, h = size
if w > self.MAXIMUM_IMAGE_SIZE:
w = self.MAXIMUM_IMAGE_SIZE
if h > self.MAXIMUM_IMAGE_SIZE:
h = self.MAXIMUM_IMAGE_SIZE
image = image.resize((w, h), PIL.Image.ANTIALIAS)
return image
def _fit_ratio_to_image(self, source_width, source_height, target_ratio):
"""Calculate the biggest (width, height) inside the source that adheres
to target ratio"""
original_ratio = float(source_width) / float(source_height)
if target_ratio > original_ratio:
width = source_width
height = int(source_width / target_ratio)
else:
width = int(source_height * target_ratio)
height = source_height
return width, height
def _determine_crop_position(self, variant, target_width, target_height):
width, height = self.image.size
x = int(width * variant.focus_x - target_width * variant.focus_x)
y = int(height * variant.focus_y - target_height * variant.focus_y)
return x, y
def _crop(self, pil_image, x1, y1, x2, y2):
pil_image = pil_image.crop((x1, y1, x2, y2))
pil_image = self._enable_alpha_channel(pil_image)
return pil_image
@property
def _color_mode(self):
# XXX This is a rather crude heuristic.
return 'RGBA' if self.context.format == 'PNG' else 'RGB'
def _enable_alpha_channel(self, pil_image):
"""Enable alpha channel for PNG images by converting to RGBA."""
if pil_image.mode != self._color_mode:
pil_image = pil_image.convert(self._color_mode)
return pil_image
def _construct_image(self, pil_image, format=None):
image = zeit.content.image.image.TemporaryImage()
if not format:
format = self.context.format
image.mimeType = self.context.mimeType
else:
image.mimeType = 'image/' + format.lower() # XXX crude heuristic.
# XXX Maybe encoder setting should be made configurable.
if format in ('JPG', 'JPEG'):
options = {'progressive': True, 'quality': 85, 'optimize': True}
elif format == 'PNG':
options = {'optimize': True}
elif format == 'WEBP':
options = {'quality': 85}
else:
options = {}
pil_image.save(image.open('w'), format, **options)
image.__parent__ = self.context
image_times = zope.dublincore.interfaces.IDCTimes(self.context, None)
if image_times and image_times.modified:
thumb_times = zope.dublincore.interfaces.IDCTimes(image)
thumb_times.modified = image_times.modified
return image
@zope.component.adapter(zeit.content.image.interfaces.IImage)
@zope.interface.implementer(zeit.content.image.interfaces.IPersistentThumbnail)
def persistent_thumbnail_factory(context):
config = zope.app.appsetup.product.getProductConfiguration(
'zeit.content.image') or {}
method_name = config.get('thumbnail-method', 'thumbnail')
width = config.get('thumbnail-width', 50)
if width:
width = int(width)
else:
width = None
height = config.get('thumbnail-height', 50)
if height:
height = int(height)
else:
height = None
thumbnail_container = zeit.content.image.interfaces.IThumbnailFolder(
context)
image_name = context.__name__
if image_name not in thumbnail_container:
transform = zeit.content.image.interfaces.ITransform(context)
method = getattr(transform, method_name)
thumbnail = method(width, height)
thumbnail_properties = (
zeit.connector.interfaces.IWebDAVWriteProperties(thumbnail))
image_properties = zeit.connector.interfaces.IWebDAVReadProperties(
context)
for (name, namespace), value in image_properties.items():
if namespace != 'DAV:':
thumbnail_properties[(name, namespace)] = value
thumbnail_properties.pop(zeit.connector.interfaces.UUID_PROPERTY, None)
thumbnail_container[image_name] = thumbnail
return thumbnail_container[image_name]
@zope.component.adapter(zeit.content.image.interfaces.IImage)
@zope.interface.implementer(zeit.content.image.interfaces.IThumbnailFolder)
def thumbnail_folder_factory(context):
name = u'thumbnails'
folder = context.__parent__
if name not in folder:
folder[name] = zeit.cms.repository.folder.Folder()
return folder[name]
| 38.90535
| 79
| 0.649884
|
import PIL.Image
import PIL.ImageColor
import PIL.ImageEnhance
import zeit.cms.repository.folder
import zeit.connector.interfaces
import zeit.content.image.interfaces
import zope.app.appsetup.product
import zope.component
import zope.interface
import zope.security.proxy
class ImageTransform(object):
zope.interface.implements(zeit.content.image.interfaces.ITransform)
zope.component.adapts(zeit.content.image.interfaces.IImage)
MAXIMUM_IMAGE_SIZE = 5000
def __init__(self, context):
self.context = context
try:
self.image = PIL.Image.open(
zope.security.proxy.removeSecurityProxy(context.open()))
self.image.load()
except IOError:
raise zeit.content.image.interfaces.ImageProcessingError(
"Cannot transform image %s" % context.__name__)
def thumbnail(self, width, height, filter=PIL.Image.ANTIALIAS):
image = self.image.copy()
image.thumbnail((width, height), filter)
return self._construct_image(image)
def resize(self, width=None, height=None, filter=PIL.Image.ANTIALIAS):
if width is None and height is None:
raise TypeError('Need at least one of width and height.')
orig_width, orig_height = self.image.size
if width is None:
width = orig_width * height / orig_height
elif height is None:
height = orig_height * width / orig_width
image = self.image.resize((width, height), filter)
return self._construct_image(image)
def create_variant_image(
self, variant, size=None, fill_color=None, format=None):
if not variant.is_default:
image = self._crop_variant_image(variant, size=size)
else:
image = self._enable_alpha_channel(self.image)
if variant.brightness is not None:
image = PIL.ImageEnhance.Brightness(image).enhance(
variant.brightness)
if variant.contrast is not None:
image = PIL.ImageEnhance.Contrast(image).enhance(
variant.contrast)
if variant.saturation is not None:
image = PIL.ImageEnhance.Color(image).enhance(
variant.saturation)
if variant.sharpness is not None:
image = PIL.ImageEnhance.Sharpness(image).enhance(
variant.sharpness)
if fill_color is not None and self._color_mode == 'RGBA':
fill_color = PIL.ImageColor.getrgb('#' + fill_color)
opaque = PIL.Image.new('RGB', image.size, fill_color)
opaque.paste(image, (0, 0), image)
image = opaque
return self._construct_image(image, format)
def _crop_variant_image(self, variant, size=None):
source_width, source_height = self.image.size
if (source_width == 0 or source_height == 0):
return self.image
zoomed_width = source_width
zoomed_height = source_height
if variant.zoom > 0:
zoomed_width = int(source_width * variant.zoom)
zoomed_height = int(source_height * variant.zoom)
target_ratio = variant.ratio
if target_ratio is None:
target_ratio = float(source_width) / float(source_height)
target_width, target_height = self._fit_ratio_to_image(
zoomed_width, zoomed_height, target_ratio)
if size:
w, h = size
override_ratio = float(w) / float(h)
target_width, target_height = self._fit_ratio_to_image(
target_width, target_height, override_ratio)
x, y = self._determine_crop_position(
variant, target_width, target_height)
image = self._crop(
self.image, x, y, x + target_width, y + target_height)
if size:
w, h = size
if w > self.MAXIMUM_IMAGE_SIZE:
w = self.MAXIMUM_IMAGE_SIZE
if h > self.MAXIMUM_IMAGE_SIZE:
h = self.MAXIMUM_IMAGE_SIZE
image = image.resize((w, h), PIL.Image.ANTIALIAS)
return image
def _fit_ratio_to_image(self, source_width, source_height, target_ratio):
original_ratio = float(source_width) / float(source_height)
if target_ratio > original_ratio:
width = source_width
height = int(source_width / target_ratio)
else:
width = int(source_height * target_ratio)
height = source_height
return width, height
def _determine_crop_position(self, variant, target_width, target_height):
width, height = self.image.size
x = int(width * variant.focus_x - target_width * variant.focus_x)
y = int(height * variant.focus_y - target_height * variant.focus_y)
return x, y
def _crop(self, pil_image, x1, y1, x2, y2):
pil_image = pil_image.crop((x1, y1, x2, y2))
pil_image = self._enable_alpha_channel(pil_image)
return pil_image
@property
def _color_mode(self):
return 'RGBA' if self.context.format == 'PNG' else 'RGB'
def _enable_alpha_channel(self, pil_image):
if pil_image.mode != self._color_mode:
pil_image = pil_image.convert(self._color_mode)
return pil_image
def _construct_image(self, pil_image, format=None):
image = zeit.content.image.image.TemporaryImage()
if not format:
format = self.context.format
image.mimeType = self.context.mimeType
else:
image.mimeType = 'image/' + format.lower() if format in ('JPG', 'JPEG'):
options = {'progressive': True, 'quality': 85, 'optimize': True}
elif format == 'PNG':
options = {'optimize': True}
elif format == 'WEBP':
options = {'quality': 85}
else:
options = {}
pil_image.save(image.open('w'), format, **options)
image.__parent__ = self.context
image_times = zope.dublincore.interfaces.IDCTimes(self.context, None)
if image_times and image_times.modified:
thumb_times = zope.dublincore.interfaces.IDCTimes(image)
thumb_times.modified = image_times.modified
return image
@zope.component.adapter(zeit.content.image.interfaces.IImage)
@zope.interface.implementer(zeit.content.image.interfaces.IPersistentThumbnail)
def persistent_thumbnail_factory(context):
config = zope.app.appsetup.product.getProductConfiguration(
'zeit.content.image') or {}
method_name = config.get('thumbnail-method', 'thumbnail')
width = config.get('thumbnail-width', 50)
if width:
width = int(width)
else:
width = None
height = config.get('thumbnail-height', 50)
if height:
height = int(height)
else:
height = None
thumbnail_container = zeit.content.image.interfaces.IThumbnailFolder(
context)
image_name = context.__name__
if image_name not in thumbnail_container:
transform = zeit.content.image.interfaces.ITransform(context)
method = getattr(transform, method_name)
thumbnail = method(width, height)
thumbnail_properties = (
zeit.connector.interfaces.IWebDAVWriteProperties(thumbnail))
image_properties = zeit.connector.interfaces.IWebDAVReadProperties(
context)
for (name, namespace), value in image_properties.items():
if namespace != 'DAV:':
thumbnail_properties[(name, namespace)] = value
thumbnail_properties.pop(zeit.connector.interfaces.UUID_PROPERTY, None)
thumbnail_container[image_name] = thumbnail
return thumbnail_container[image_name]
@zope.component.adapter(zeit.content.image.interfaces.IImage)
@zope.interface.implementer(zeit.content.image.interfaces.IThumbnailFolder)
def thumbnail_folder_factory(context):
name = u'thumbnails'
folder = context.__parent__
if name not in folder:
folder[name] = zeit.cms.repository.folder.Folder()
return folder[name]
| true
| true
|
f70459c5cac1ef72f59035358cf6fe0cccf00ab0
| 3,418
|
py
|
Python
|
tests/unit/test_parameters/test_current_functions.py
|
NunoEdgarGFlowHub/PyBaMM
|
4e4e1ab8c488b0c0a6efdb9934c5ac59e947a190
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/test_parameters/test_current_functions.py
|
NunoEdgarGFlowHub/PyBaMM
|
4e4e1ab8c488b0c0a6efdb9934c5ac59e947a190
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/test_parameters/test_current_functions.py
|
NunoEdgarGFlowHub/PyBaMM
|
4e4e1ab8c488b0c0a6efdb9934c5ac59e947a190
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Tests for current input functions
#
import pybamm
import numbers
import unittest
import numpy as np
class TestCurrentFunctions(unittest.TestCase):
def test_constant_current(self):
# test simplify
current = pybamm.electrical_parameters.current_with_time
parameter_values = pybamm.ParameterValues(
{
"Typical current [A]": 2,
"Typical timescale [s]": 1,
"Current function [A]": 2,
}
)
processed_current = parameter_values.process_symbol(current)
self.assertIsInstance(processed_current.simplify(), pybamm.Scalar)
def test_get_current_data(self):
# test process parameters
dimensional_current = pybamm.electrical_parameters.dimensional_current_with_time
parameter_values = pybamm.ParameterValues(
{
"Typical current [A]": 2,
"Typical timescale [s]": 1,
"Current function [A]": "[current data]car_current",
}
)
dimensional_current_eval = parameter_values.process_symbol(dimensional_current)
def current(t):
return dimensional_current_eval.evaluate(t=t)
standard_tests = StandardCurrentFunctionTests([current], always_array=True)
standard_tests.test_all()
def test_user_current(self):
# create user-defined sin function
def my_fun(t, A, omega):
return A * pybamm.sin(2 * np.pi * omega * t)
# choose amplitude and frequency
A = pybamm.electrical_parameters.I_typ
omega = pybamm.Parameter("omega")
def current(t):
return my_fun(t, A, omega)
# set and process parameters
parameter_values = pybamm.ParameterValues(
{
"Typical current [A]": 2,
"Typical timescale [s]": 1,
"omega": 3,
"Current function [A]": current,
}
)
dimensional_current = pybamm.electrical_parameters.dimensional_current_with_time
dimensional_current_eval = parameter_values.process_symbol(dimensional_current)
def user_current(t):
return dimensional_current_eval.evaluate(t=t)
# check output types
standard_tests = StandardCurrentFunctionTests([user_current])
standard_tests.test_all()
# check output correct value
time = np.linspace(0, 3600, 600)
np.testing.assert_array_almost_equal(
user_current(time), 2 * np.sin(2 * np.pi * 3 * time)
)
class StandardCurrentFunctionTests(object):
def __init__(self, function_list, always_array=False):
self.function_list = function_list
self.always_array = always_array
def test_output_type(self):
for function in self.function_list:
if self.always_array is True:
assert isinstance(function(0), np.ndarray)
else:
assert isinstance(function(0), numbers.Number)
assert isinstance(function(np.zeros(3)), np.ndarray)
assert isinstance(function(np.zeros([3, 3])), np.ndarray)
def test_all(self):
self.test_output_type()
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
| 32.245283
| 88
| 0.622001
|
import pybamm
import numbers
import unittest
import numpy as np
class TestCurrentFunctions(unittest.TestCase):
def test_constant_current(self):
current = pybamm.electrical_parameters.current_with_time
parameter_values = pybamm.ParameterValues(
{
"Typical current [A]": 2,
"Typical timescale [s]": 1,
"Current function [A]": 2,
}
)
processed_current = parameter_values.process_symbol(current)
self.assertIsInstance(processed_current.simplify(), pybamm.Scalar)
def test_get_current_data(self):
dimensional_current = pybamm.electrical_parameters.dimensional_current_with_time
parameter_values = pybamm.ParameterValues(
{
"Typical current [A]": 2,
"Typical timescale [s]": 1,
"Current function [A]": "[current data]car_current",
}
)
dimensional_current_eval = parameter_values.process_symbol(dimensional_current)
def current(t):
return dimensional_current_eval.evaluate(t=t)
standard_tests = StandardCurrentFunctionTests([current], always_array=True)
standard_tests.test_all()
def test_user_current(self):
def my_fun(t, A, omega):
return A * pybamm.sin(2 * np.pi * omega * t)
A = pybamm.electrical_parameters.I_typ
omega = pybamm.Parameter("omega")
def current(t):
return my_fun(t, A, omega)
parameter_values = pybamm.ParameterValues(
{
"Typical current [A]": 2,
"Typical timescale [s]": 1,
"omega": 3,
"Current function [A]": current,
}
)
dimensional_current = pybamm.electrical_parameters.dimensional_current_with_time
dimensional_current_eval = parameter_values.process_symbol(dimensional_current)
def user_current(t):
return dimensional_current_eval.evaluate(t=t)
standard_tests = StandardCurrentFunctionTests([user_current])
standard_tests.test_all()
time = np.linspace(0, 3600, 600)
np.testing.assert_array_almost_equal(
user_current(time), 2 * np.sin(2 * np.pi * 3 * time)
)
class StandardCurrentFunctionTests(object):
def __init__(self, function_list, always_array=False):
self.function_list = function_list
self.always_array = always_array
def test_output_type(self):
for function in self.function_list:
if self.always_array is True:
assert isinstance(function(0), np.ndarray)
else:
assert isinstance(function(0), numbers.Number)
assert isinstance(function(np.zeros(3)), np.ndarray)
assert isinstance(function(np.zeros([3, 3])), np.ndarray)
def test_all(self):
self.test_output_type()
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
| true
| true
|
f7045a7748f2d0754f675332513daaafa28aceaf
| 940
|
py
|
Python
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/domainservice/models/SubDomainExist.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 14
|
2018-04-19T09:53:56.000Z
|
2022-01-27T06:05:48.000Z
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/domainservice/models/SubDomainExist.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 15
|
2018-09-11T05:39:54.000Z
|
2021-07-02T12:38:02.000Z
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/domainservice/models/SubDomainExist.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 33
|
2018-04-20T05:29:16.000Z
|
2022-02-17T09:10:05.000Z
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class SubDomainExist(object):
def __init__(self, domain=None, isExist=None):
"""
:param domain: (Optional) 子域名
:param isExist: (Optional) 子域名的存在状态,1:存在,2:不存在,3:zone不存在
"""
self.domain = domain
self.isExist = isExist
| 31.333333
| 75
| 0.711702
|
class SubDomainExist(object):
def __init__(self, domain=None, isExist=None):
self.domain = domain
self.isExist = isExist
| true
| true
|
f7045a8b76dd75929fb90ffcd1baf9e5c780d065
| 9,167
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20161201/get_public_ip_address.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20161201/get_public_ip_address.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20161201/get_public_ip_address.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetPublicIPAddressResult',
'AwaitableGetPublicIPAddressResult',
'get_public_ip_address',
]
@pulumi.output_type
class GetPublicIPAddressResult:
"""
Public IP address resource.
"""
def __init__(__self__, dns_settings=None, etag=None, id=None, idle_timeout_in_minutes=None, ip_address=None, ip_configuration=None, location=None, name=None, provisioning_state=None, public_ip_address_version=None, public_ip_allocation_method=None, resource_guid=None, tags=None, type=None):
if dns_settings and not isinstance(dns_settings, dict):
raise TypeError("Expected argument 'dns_settings' to be a dict")
pulumi.set(__self__, "dns_settings", dns_settings)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes and not isinstance(idle_timeout_in_minutes, int):
raise TypeError("Expected argument 'idle_timeout_in_minutes' to be a int")
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if ip_address and not isinstance(ip_address, str):
raise TypeError("Expected argument 'ip_address' to be a str")
pulumi.set(__self__, "ip_address", ip_address)
if ip_configuration and not isinstance(ip_configuration, dict):
raise TypeError("Expected argument 'ip_configuration' to be a dict")
pulumi.set(__self__, "ip_configuration", ip_configuration)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address_version and not isinstance(public_ip_address_version, str):
raise TypeError("Expected argument 'public_ip_address_version' to be a str")
pulumi.set(__self__, "public_ip_address_version", public_ip_address_version)
if public_ip_allocation_method and not isinstance(public_ip_allocation_method, str):
raise TypeError("Expected argument 'public_ip_allocation_method' to be a str")
pulumi.set(__self__, "public_ip_allocation_method", public_ip_allocation_method)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dnsSettings")
def dns_settings(self) -> Optional['outputs.PublicIPAddressDnsSettingsResponse']:
"""
The FQDN of the DNS record associated with the public IP address.
"""
return pulumi.get(self, "dns_settings")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[int]:
"""
The idle timeout of the public IP address.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="ipConfiguration")
def ip_configuration(self) -> 'outputs.IPConfigurationResponse':
"""
IPConfiguration
"""
return pulumi.get(self, "ip_configuration")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddressVersion")
def public_ip_address_version(self) -> Optional[str]:
"""
The public IP address version. Possible values are: 'IPv4' and 'IPv6'.
"""
return pulumi.get(self, "public_ip_address_version")
@property
@pulumi.getter(name="publicIPAllocationMethod")
def public_ip_allocation_method(self) -> Optional[str]:
"""
The public IP allocation method. Possible values are: 'Static' and 'Dynamic'.
"""
return pulumi.get(self, "public_ip_allocation_method")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the public IP resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetPublicIPAddressResult(GetPublicIPAddressResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPublicIPAddressResult(
dns_settings=self.dns_settings,
etag=self.etag,
id=self.id,
idle_timeout_in_minutes=self.idle_timeout_in_minutes,
ip_address=self.ip_address,
ip_configuration=self.ip_configuration,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
public_ip_address_version=self.public_ip_address_version,
public_ip_allocation_method=self.public_ip_allocation_method,
resource_guid=self.resource_guid,
tags=self.tags,
type=self.type)
def get_public_ip_address(expand: Optional[str] = None,
public_ip_address_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPublicIPAddressResult:
"""
Public IP address resource.
:param str expand: Expands referenced resources.
:param str public_ip_address_name: The name of the subnet.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['publicIpAddressName'] = public_ip_address_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20161201:getPublicIPAddress', __args__, opts=opts, typ=GetPublicIPAddressResult).value
return AwaitableGetPublicIPAddressResult(
dns_settings=__ret__.dns_settings,
etag=__ret__.etag,
id=__ret__.id,
idle_timeout_in_minutes=__ret__.idle_timeout_in_minutes,
ip_address=__ret__.ip_address,
ip_configuration=__ret__.ip_configuration,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
public_ip_address_version=__ret__.public_ip_address_version,
public_ip_allocation_method=__ret__.public_ip_allocation_method,
resource_guid=__ret__.resource_guid,
tags=__ret__.tags,
type=__ret__.type)
| 38.84322
| 295
| 0.66423
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetPublicIPAddressResult',
'AwaitableGetPublicIPAddressResult',
'get_public_ip_address',
]
@pulumi.output_type
class GetPublicIPAddressResult:
def __init__(__self__, dns_settings=None, etag=None, id=None, idle_timeout_in_minutes=None, ip_address=None, ip_configuration=None, location=None, name=None, provisioning_state=None, public_ip_address_version=None, public_ip_allocation_method=None, resource_guid=None, tags=None, type=None):
if dns_settings and not isinstance(dns_settings, dict):
raise TypeError("Expected argument 'dns_settings' to be a dict")
pulumi.set(__self__, "dns_settings", dns_settings)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes and not isinstance(idle_timeout_in_minutes, int):
raise TypeError("Expected argument 'idle_timeout_in_minutes' to be a int")
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if ip_address and not isinstance(ip_address, str):
raise TypeError("Expected argument 'ip_address' to be a str")
pulumi.set(__self__, "ip_address", ip_address)
if ip_configuration and not isinstance(ip_configuration, dict):
raise TypeError("Expected argument 'ip_configuration' to be a dict")
pulumi.set(__self__, "ip_configuration", ip_configuration)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address_version and not isinstance(public_ip_address_version, str):
raise TypeError("Expected argument 'public_ip_address_version' to be a str")
pulumi.set(__self__, "public_ip_address_version", public_ip_address_version)
if public_ip_allocation_method and not isinstance(public_ip_allocation_method, str):
raise TypeError("Expected argument 'public_ip_allocation_method' to be a str")
pulumi.set(__self__, "public_ip_allocation_method", public_ip_allocation_method)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dnsSettings")
def dns_settings(self) -> Optional['outputs.PublicIPAddressDnsSettingsResponse']:
return pulumi.get(self, "dns_settings")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[int]:
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="ipConfiguration")
def ip_configuration(self) -> 'outputs.IPConfigurationResponse':
return pulumi.get(self, "ip_configuration")
@property
@pulumi.getter
def location(self) -> Optional[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddressVersion")
def public_ip_address_version(self) -> Optional[str]:
return pulumi.get(self, "public_ip_address_version")
@property
@pulumi.getter(name="publicIPAllocationMethod")
def public_ip_allocation_method(self) -> Optional[str]:
return pulumi.get(self, "public_ip_allocation_method")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetPublicIPAddressResult(GetPublicIPAddressResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPublicIPAddressResult(
dns_settings=self.dns_settings,
etag=self.etag,
id=self.id,
idle_timeout_in_minutes=self.idle_timeout_in_minutes,
ip_address=self.ip_address,
ip_configuration=self.ip_configuration,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
public_ip_address_version=self.public_ip_address_version,
public_ip_allocation_method=self.public_ip_allocation_method,
resource_guid=self.resource_guid,
tags=self.tags,
type=self.type)
def get_public_ip_address(expand: Optional[str] = None,
public_ip_address_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPublicIPAddressResult:
__args__ = dict()
__args__['expand'] = expand
__args__['publicIpAddressName'] = public_ip_address_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20161201:getPublicIPAddress', __args__, opts=opts, typ=GetPublicIPAddressResult).value
return AwaitableGetPublicIPAddressResult(
dns_settings=__ret__.dns_settings,
etag=__ret__.etag,
id=__ret__.id,
idle_timeout_in_minutes=__ret__.idle_timeout_in_minutes,
ip_address=__ret__.ip_address,
ip_configuration=__ret__.ip_configuration,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
public_ip_address_version=__ret__.public_ip_address_version,
public_ip_allocation_method=__ret__.public_ip_allocation_method,
resource_guid=__ret__.resource_guid,
tags=__ret__.tags,
type=__ret__.type)
| true
| true
|
f7045aca0807423bddc8738c277100b4972f3dc4
| 891
|
py
|
Python
|
examples/command/unix_ps.py
|
carr-elagheb/moler
|
b896ff668d9cc3704b6f806f7c2bf6e76c13427d
|
[
"BSD-3-Clause"
] | null | null | null |
examples/command/unix_ps.py
|
carr-elagheb/moler
|
b896ff668d9cc3704b6f806f7c2bf6e76c13427d
|
[
"BSD-3-Clause"
] | null | null | null |
examples/command/unix_ps.py
|
carr-elagheb/moler
|
b896ff668d9cc3704b6f806f7c2bf6e76c13427d
|
[
"BSD-3-Clause"
] | null | null | null |
from moler.cmd.unix.ps import Ps
from moler.observable_connection import ObservableConnection, get_connection
from moler.io.raw.terminal import ThreadedTerminal
# v.1 - combine all manually
# moler_conn = ObservableConnection()
# terminal = ThreadedTerminal(moler_connection=moler_conn)
# v.2 - let factory combine
terminal = get_connection(io_type='terminal', variant='threaded')
# v.3 - let factory select default variant
# terminal = get_connection(io_type='terminal')
with terminal.open():
ps_cmd = Ps(connection=terminal.moler_connection, options="-ef")
processes = ps_cmd()
for proc in processes:
if 'python' in proc['CMD']:
print("PID: {} CMD: {}".format(proc['PID'], proc['CMD']))
# result:
"""
PID: 1817 CMD: /usr/bin/python /usr/share/system-config-printer/applet.py
PID: 21825 CMD: /usr/bin/python /home/gl/moler/examples/command/unix_ps.py
"""
| 35.64
| 76
| 0.728395
|
from moler.cmd.unix.ps import Ps
from moler.observable_connection import ObservableConnection, get_connection
from moler.io.raw.terminal import ThreadedTerminal
terminal = get_connection(io_type='terminal', variant='threaded')
with terminal.open():
ps_cmd = Ps(connection=terminal.moler_connection, options="-ef")
processes = ps_cmd()
for proc in processes:
if 'python' in proc['CMD']:
print("PID: {} CMD: {}".format(proc['PID'], proc['CMD']))
| true
| true
|
f7045b7726258ee50846aef00faea6ad2f193365
| 5,213
|
py
|
Python
|
hym/ac.py
|
AugustUnderground/oaceis
|
73abc3b9703b84322764d2a40def915d8c1e69a7
|
[
"MIT"
] | null | null | null |
hym/ac.py
|
AugustUnderground/oaceis
|
73abc3b9703b84322764d2a40def915d8c1e69a7
|
[
"MIT"
] | null | null | null |
hym/ac.py
|
AugustUnderground/oaceis
|
73abc3b9703b84322764d2a40def915d8c1e69a7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __coconut_hash__ = 0xde71c936
# Compiled with Coconut version 2.0.0-a_dev33 [How Not to Be Seen]
# Coconut Header: -------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
import sys as _coconut_sys, os as _coconut_os
_coconut_file_dir = _coconut_os.path.dirname(_coconut_os.path.abspath(__file__))
_coconut_cached_module = _coconut_sys.modules.get(str("__coconut__"))
if _coconut_cached_module is not None and _coconut_os.path.dirname(_coconut_cached_module.__file__) != _coconut_file_dir: # type: ignore
del _coconut_sys.modules[str("__coconut__")]
_coconut_sys.path.insert(0, _coconut_file_dir)
_coconut_module_name = _coconut_os.path.splitext(_coconut_os.path.basename(_coconut_file_dir))[0]
if _coconut_module_name and _coconut_module_name[0].isalpha() and all(c.isalpha() or c.isdigit() for c in _coconut_module_name) and "__init__.py" in _coconut_os.listdir(_coconut_file_dir):
_coconut_full_module_name = str(_coconut_module_name + ".__coconut__")
import __coconut__ as _coconut__coconut__
_coconut__coconut__.__name__ = _coconut_full_module_name
for _coconut_v in vars(_coconut__coconut__).values():
if getattr(_coconut_v, "__module__", None) == str("__coconut__"):
try:
_coconut_v.__module__ = _coconut_full_module_name
except AttributeError:
_coconut_v_type = type(_coconut_v)
if getattr(_coconut_v_type, "__module__", None) == str("__coconut__"):
_coconut_v_type.__module__ = _coconut_full_module_name
_coconut_sys.modules[_coconut_full_module_name] = _coconut__coconut__
from __coconut__ import *
from __coconut__ import _coconut_tail_call, _coconut_tco, _coconut_call_set_names, _coconut_handle_cls_kwargs, _coconut_handle_cls_stargs, _namedtuple_of, _coconut, _coconut_MatchError, _coconut_iter_getitem, _coconut_base_compose, _coconut_forward_compose, _coconut_back_compose, _coconut_forward_star_compose, _coconut_back_star_compose, _coconut_forward_dubstar_compose, _coconut_back_dubstar_compose, _coconut_pipe, _coconut_star_pipe, _coconut_dubstar_pipe, _coconut_back_pipe, _coconut_back_star_pipe, _coconut_back_dubstar_pipe, _coconut_none_pipe, _coconut_none_star_pipe, _coconut_none_dubstar_pipe, _coconut_bool_and, _coconut_bool_or, _coconut_none_coalesce, _coconut_minus, _coconut_map, _coconut_partial, _coconut_get_function_match_error, _coconut_base_pattern_func, _coconut_addpattern, _coconut_sentinel, _coconut_assert, _coconut_mark_as_match, _coconut_reiterable, _coconut_self_match_types, _coconut_dict_merge, _coconut_exec, _coconut_comma_op, _coconut_multi_dim_arr
_coconut_sys.path.pop(0)
# Compiled Coconut: -----------------------------------------------------------
from argparse import ArgumentParser
from collections import namedtuple
if _coconut_sys.version_info < (3, 3):
from collections import Iterable
else:
from collections.abc import Iterable
import hace
parser = ArgumentParser()
parser.add_argument("--host", type=str, default="localhost", help="Host address")
parser.add_argument("-p", "--port", type=int, default="6006", help="Server Port")
parser.add_argument("-e", "--env", type=str, default="op2", help="ACE Environment ID, see GACE doc for what's available")
parser.add_argument("-n", "--num", type=int, default=1, help="Number of Pooled Envs")
parser.add_argument("--pdk", type=str, default="xh035-3V3", help="ACE backend, see GACE doc for what's available")
@_coconut_tco
def isiterable(obj):
return _coconut_tail_call(isinstance, obj, Iterable)
def make_env(env_id, #type: str
backend, #type: str
num=1 #type: int
):
env = (hace.make_env(env_id, backend) if num == 1 else hace.make_same_env_pool(num, env_id, backend))
return env
def simulate_pool(envs, sizings #type: dict[int, dict[str, float]]
):
sizing = dict(((int(i)), (s)) for i, s in sizings.items())
perf = hace.evaluate_circuit_pool(envs, sizing)
return perf
def simulate_single(env, sizing #type: dict[str, float]
):
perf = hace.evaluate_circuit(env, sizing)
return perf
def simulate(env, sizing):
perf = (simulate_pool(env, sizing) if isiterable(env) else simulate_single(env, sizing))
return perf
def performance(env):
perf = ((hace.current_performance_pool if isiterable(env) else hace.current_performance))(env)
return perf
def sizing(env):
size = ((hace.current_sizing_pool if isiterable(env) else hace.current_sizing))(env)
return size
def performance_parameters(env):
pps = {"params": ((hace.performance_identifiers_pool if isiterable(env) else hace.performance_identifiers))(env)}
return pps
def sizing_parameters(env):
sps = {"params": ((hace.sizing_identifiers_pool if isiterable(env) else hace.sizing_identifiers))(env)}
return sps
def initial_sizing(env):
init = ((hace.initial_sizing_pool if isiterable(env) else hace.initial_sizing))(env)
return init
def random_sizing(env):
rng = ((hace.random_sizing_pool if isiterable(env) else hace.random_sizing))(env)
return rng
| 46.544643
| 987
| 0.750815
|
from __future__ import print_function, absolute_import, unicode_literals, division
import sys as _coconut_sys, os as _coconut_os
_coconut_file_dir = _coconut_os.path.dirname(_coconut_os.path.abspath(__file__))
_coconut_cached_module = _coconut_sys.modules.get(str("__coconut__"))
if _coconut_cached_module is not None and _coconut_os.path.dirname(_coconut_cached_module.__file__) != _coconut_file_dir: del _coconut_sys.modules[str("__coconut__")]
_coconut_sys.path.insert(0, _coconut_file_dir)
_coconut_module_name = _coconut_os.path.splitext(_coconut_os.path.basename(_coconut_file_dir))[0]
if _coconut_module_name and _coconut_module_name[0].isalpha() and all(c.isalpha() or c.isdigit() for c in _coconut_module_name) and "__init__.py" in _coconut_os.listdir(_coconut_file_dir):
_coconut_full_module_name = str(_coconut_module_name + ".__coconut__")
import __coconut__ as _coconut__coconut__
_coconut__coconut__.__name__ = _coconut_full_module_name
for _coconut_v in vars(_coconut__coconut__).values():
if getattr(_coconut_v, "__module__", None) == str("__coconut__"):
try:
_coconut_v.__module__ = _coconut_full_module_name
except AttributeError:
_coconut_v_type = type(_coconut_v)
if getattr(_coconut_v_type, "__module__", None) == str("__coconut__"):
_coconut_v_type.__module__ = _coconut_full_module_name
_coconut_sys.modules[_coconut_full_module_name] = _coconut__coconut__
from __coconut__ import *
from __coconut__ import _coconut_tail_call, _coconut_tco, _coconut_call_set_names, _coconut_handle_cls_kwargs, _coconut_handle_cls_stargs, _namedtuple_of, _coconut, _coconut_MatchError, _coconut_iter_getitem, _coconut_base_compose, _coconut_forward_compose, _coconut_back_compose, _coconut_forward_star_compose, _coconut_back_star_compose, _coconut_forward_dubstar_compose, _coconut_back_dubstar_compose, _coconut_pipe, _coconut_star_pipe, _coconut_dubstar_pipe, _coconut_back_pipe, _coconut_back_star_pipe, _coconut_back_dubstar_pipe, _coconut_none_pipe, _coconut_none_star_pipe, _coconut_none_dubstar_pipe, _coconut_bool_and, _coconut_bool_or, _coconut_none_coalesce, _coconut_minus, _coconut_map, _coconut_partial, _coconut_get_function_match_error, _coconut_base_pattern_func, _coconut_addpattern, _coconut_sentinel, _coconut_assert, _coconut_mark_as_match, _coconut_reiterable, _coconut_self_match_types, _coconut_dict_merge, _coconut_exec, _coconut_comma_op, _coconut_multi_dim_arr
_coconut_sys.path.pop(0)
from argparse import ArgumentParser
from collections import namedtuple
if _coconut_sys.version_info < (3, 3):
from collections import Iterable
else:
from collections.abc import Iterable
import hace
parser = ArgumentParser()
parser.add_argument("--host", type=str, default="localhost", help="Host address")
parser.add_argument("-p", "--port", type=int, default="6006", help="Server Port")
parser.add_argument("-e", "--env", type=str, default="op2", help="ACE Environment ID, see GACE doc for what's available")
parser.add_argument("-n", "--num", type=int, default=1, help="Number of Pooled Envs")
parser.add_argument("--pdk", type=str, default="xh035-3V3", help="ACE backend, see GACE doc for what's available")
@_coconut_tco
def isiterable(obj):
return _coconut_tail_call(isinstance, obj, Iterable)
def make_env(env_id, backend, num=1 ):
env = (hace.make_env(env_id, backend) if num == 1 else hace.make_same_env_pool(num, env_id, backend))
return env
def simulate_pool(envs, sizings ):
sizing = dict(((int(i)), (s)) for i, s in sizings.items())
perf = hace.evaluate_circuit_pool(envs, sizing)
return perf
def simulate_single(env, sizing ):
perf = hace.evaluate_circuit(env, sizing)
return perf
def simulate(env, sizing):
perf = (simulate_pool(env, sizing) if isiterable(env) else simulate_single(env, sizing))
return perf
def performance(env):
perf = ((hace.current_performance_pool if isiterable(env) else hace.current_performance))(env)
return perf
def sizing(env):
size = ((hace.current_sizing_pool if isiterable(env) else hace.current_sizing))(env)
return size
def performance_parameters(env):
pps = {"params": ((hace.performance_identifiers_pool if isiterable(env) else hace.performance_identifiers))(env)}
return pps
def sizing_parameters(env):
sps = {"params": ((hace.sizing_identifiers_pool if isiterable(env) else hace.sizing_identifiers))(env)}
return sps
def initial_sizing(env):
init = ((hace.initial_sizing_pool if isiterable(env) else hace.initial_sizing))(env)
return init
def random_sizing(env):
rng = ((hace.random_sizing_pool if isiterable(env) else hace.random_sizing))(env)
return rng
| true
| true
|
f7045cc997340a8708c325c5a56407dc3ecffd1d
| 1,604
|
py
|
Python
|
SS-GMNN-GraphMix/GraphMix-par/run_citeseer_ss.py
|
TAMU-VITA/SS-GCNs
|
644f8a5f3b507be6d59be02747be406fabd8b8f9
|
[
"MIT"
] | 1
|
2021-06-07T15:18:10.000Z
|
2021-06-07T15:18:10.000Z
|
SS-GMNN-GraphMix/GraphMix-par/run_citeseer_ss.py
|
TAMU-VITA/SS-GCNs
|
644f8a5f3b507be6d59be02747be406fabd8b8f9
|
[
"MIT"
] | null | null | null |
SS-GMNN-GraphMix/GraphMix-par/run_citeseer_ss.py
|
TAMU-VITA/SS-GCNs
|
644f8a5f3b507be6d59be02747be406fabd8b8f9
|
[
"MIT"
] | null | null | null |
import sys
import os
import copy
import json
import datetime
opt = dict()
opt['dataset'] = '../data/citeseer'
opt['hidden_dim'] = 16
opt['input_dropout'] = 0.5
opt['dropout'] = 0
opt['optimizer'] = 'adam'
opt['lr'] = 0.01
opt['decay'] = 5e-4
opt['self_link_weight'] = 1.0
opt['pre_epoch'] = 2000
opt['epoch'] = 100
opt['iter'] = 1
opt['use_gold'] = 1
opt['draw'] = 'smp'
opt['tau'] = 0.0
opt['save'] = 'exp_citeseer'
opt['mixup_alpha'] =1.0
opt['partition_num'] = 0
opt['task_ratio'] = 0
### ict hyperparameters ###
opt['ema_decay'] = 0.999
opt['consistency_type'] = "mse"
opt['consistency_rampup_starts'] = 500
opt['consistency_rampup_ends'] = 1000
opt['mixup_consistency'] = 10.0
def generate_command(opt):
cmd = 'python3 train.py'
for opt, val in opt.items():
cmd += ' --' + opt + ' ' + str(val)
return cmd
def run(opt):
opt_ = copy.deepcopy(opt)
os.system(generate_command(opt_))
os.system('rm record.txt')
os.system('echo -n -> record.txt')
os.system('rm record_val.txt')
os.system('echo -n -> record_val.txt')
partition_num_list = [8,9,10,11,12,13,14,15,16]
task_ratio_list = [0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
for p in partition_num_list:
for t in task_ratio_list:
os.system('rm record.txt')
os.system('echo -n -> record.txt')
opt['partition_num'] = p
opt['task_ratio'] = t
for k in range(10):
seed = k + 1
opt['seed'] = seed
run(opt)
os.system('python result_cal.py')
with open('record_val.txt', 'a') as f:
f.write(str(p) + ' ' + str(t) + '\n')
| 21.675676
| 51
| 0.598504
|
import sys
import os
import copy
import json
import datetime
opt = dict()
opt['dataset'] = '../data/citeseer'
opt['hidden_dim'] = 16
opt['input_dropout'] = 0.5
opt['dropout'] = 0
opt['optimizer'] = 'adam'
opt['lr'] = 0.01
opt['decay'] = 5e-4
opt['self_link_weight'] = 1.0
opt['pre_epoch'] = 2000
opt['epoch'] = 100
opt['iter'] = 1
opt['use_gold'] = 1
opt['draw'] = 'smp'
opt['tau'] = 0.0
opt['save'] = 'exp_citeseer'
opt['mixup_alpha'] =1.0
opt['partition_num'] = 0
opt['task_ratio'] = 0
opt['ema_decay'] = 0.999
opt['consistency_type'] = "mse"
opt['consistency_rampup_starts'] = 500
opt['consistency_rampup_ends'] = 1000
opt['mixup_consistency'] = 10.0
def generate_command(opt):
cmd = 'python3 train.py'
for opt, val in opt.items():
cmd += ' --' + opt + ' ' + str(val)
return cmd
def run(opt):
opt_ = copy.deepcopy(opt)
os.system(generate_command(opt_))
os.system('rm record.txt')
os.system('echo -n -> record.txt')
os.system('rm record_val.txt')
os.system('echo -n -> record_val.txt')
partition_num_list = [8,9,10,11,12,13,14,15,16]
task_ratio_list = [0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
for p in partition_num_list:
for t in task_ratio_list:
os.system('rm record.txt')
os.system('echo -n -> record.txt')
opt['partition_num'] = p
opt['task_ratio'] = t
for k in range(10):
seed = k + 1
opt['seed'] = seed
run(opt)
os.system('python result_cal.py')
with open('record_val.txt', 'a') as f:
f.write(str(p) + ' ' + str(t) + '\n')
| true
| true
|
f7045d94952b05c34c83c62669bb8a4442772b67
| 12,907
|
py
|
Python
|
Core/hippoSeg/LiviaNet/startTraining.py
|
YongLiuLab/BrainRadiomicsTools
|
19b440acd554ee920857c306442b6d2c411dca88
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 10
|
2019-09-26T03:12:52.000Z
|
2022-02-25T06:05:38.000Z
|
Core/hippoSeg/LiviaNet/startTraining.py
|
YongLiuLab/BrainRadiomicsTools
|
19b440acd554ee920857c306442b6d2c411dca88
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
Core/hippoSeg/LiviaNet/startTraining.py
|
YongLiuLab/BrainRadiomicsTools
|
19b440acd554ee920857c306442b6d2c411dca88
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 8
|
2020-02-26T01:54:48.000Z
|
2022-03-19T01:23:55.000Z
|
"""
Copyright (c) 2016, Jose Dolz .All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Jose Dolz. Dec, 2016.
email: jose.dolz.upv@gmail.com
LIVIA Department, ETS, Montreal.
"""
import os
import numpy as np
from Modules.IO.sampling import getSamplesSubepoch
from Modules.General.Utils import dump_model_to_gzip_file
from Modules.General.Utils import getImagesSet
from Modules.General.Utils import load_model_from_gzip_file
from Modules.Parsers.parsersUtils import parserConfigIni
from startTesting import segmentVolume
def startTraining(networkModelName,configIniName):
print (" ************************************************ STARTING TRAINING **************************************************")
print (" ********************** Starting training model (Reading parameters) **********************")
myParserConfigIni = parserConfigIni()
myParserConfigIni.readConfigIniFile(configIniName,1)
# Image type (0: Nifti, 1: Matlab)
imageType = myParserConfigIni.imageTypesTrain
print (" --- Do training in {} epochs with {} subEpochs each...".format(myParserConfigIni.numberOfEpochs, myParserConfigIni.numberOfSubEpochs))
print ("-------- Reading Images names used in training/validation -------------")
##-----##
# from sklearn.model_selection import KFold
# import numpy as np
# y1 = myParserConfigIni.indexesForTraining
# #x1 = myParserConfigIni.indexesForValidation
# kf = KFold(n_splits= 5)
#
# for train_index, test_index in kf.split(y1):
# print("TRAIN:", train_index, "TEST:", test_index)
# y, x = np.array(y1)[train_index], np.array(y1)[test_index]
##-----##
# from sklearn.model_selection import LeavePOut
# lpo = LeavePOut(p=5)
# y1 = myParserConfigIni.indexesForTraining
# for train, test in lpo.split(y1):
# y, x = np.array(y1)[train], np.array(y1)[test]
##-----train##
from sklearn.cross_validation import LeaveOneOut
loo = LeaveOneOut(4)
y1 = myParserConfigIni.indexesForTraining
x1 = myParserConfigIni.indexesForValidation
for train_index, test_index in loo:
print("TRAIN:", train_index, "TEST:", test_index)
y, x = np.array(y1)[train_index], np.array(y1)[test_index]
##------he
# from sklearn.model_selection import train_test_split
# X_train, X_test, Y_train, Y_test = train_test_split(DataX, DataY, test_size=0.2)
# -- Get list of images used for training -- #
(imageNames_Train, names_Train) = getImagesSet(myParserConfigIni.imagesFolder,y) # Images
(groundTruthNames_Train, gt_names_Train) = getImagesSet(myParserConfigIni.GroundTruthFolder,y) # Ground truth
(roiNames_Train, roi_names_Train) = getImagesSet(myParserConfigIni.ROIFolder,y) # ROI
# -- Get list of images used for validation -- #
(imageNames_Val, names_Val) = getImagesSet(myParserConfigIni.imagesFolder,x) # Images
(groundTruthNames_Val, gt_names_Val) = getImagesSet(myParserConfigIni.GroundTruthFolder,x) # Ground truth
(roiNames_Val, roi_names_Val) = getImagesSet(myParserConfigIni.ROIFolder,x) # ROI
# Print names
print (" ================== Images for training ================")
for i in range(0,len(names_Train)):
if len(roi_names_Train) > 0:
print(" Image({}): {} | GT: {} | ROI {} ".format(i,names_Train[i], gt_names_Train[i], roi_names_Train[i] ))
else:
print(" Image({}): {} | GT: {} ".format(i,names_Train[i], gt_names_Train[i] ))
print (" ================== Images for validation ================")
for i in range(0,len(names_Val)):
if len(roi_names_Train) > 0:
print(" Image({}): {} | GT: {} | ROI {} ".format(i,names_Val[i], gt_names_Val[i], roi_names_Val[i] ))
else:
print(" Image({}): {} | GT: {} ".format(i,names_Val[i], gt_names_Val[i]))
print (" ===============================================================")
# --------------- Load my LiviaNet3D object ---------------
print (" ... Loading model from {}".format(networkModelName))
myLiviaNet3D = load_model_from_gzip_file(networkModelName)
print (" ... Network architecture successfully loaded....")
# Asign parameters to loaded Net
myLiviaNet3D.numberOfEpochs = myParserConfigIni.numberOfEpochs
myLiviaNet3D.numberOfSubEpochs = myParserConfigIni.numberOfSubEpochs
myLiviaNet3D.numberOfSamplesSupEpoch = myParserConfigIni.numberOfSamplesSupEpoch
myLiviaNet3D.firstEpochChangeLR = myParserConfigIni.firstEpochChangeLR
myLiviaNet3D.frequencyChangeLR = myParserConfigIni.frequencyChangeLR
numberOfEpochs = myLiviaNet3D.numberOfEpochs
numberOfSubEpochs = myLiviaNet3D.numberOfSubEpochs
numberOfSamplesSupEpoch = myLiviaNet3D.numberOfSamplesSupEpoch
# --------------- -------------- ---------------
# --------------- Start TRAINING ---------------
# --------------- -------------- ---------------
# Get sample dimension values
receptiveField = myLiviaNet3D.receptiveField
sampleSize_Train = myLiviaNet3D.sampleSize_Train
trainingCost = []
if myParserConfigIni.applyPadding == 1:
applyPadding = True
else:
applyPadding = False
learningRateModifiedEpoch = 0
# Run over all the (remaining) epochs and subepochs
for e_i in xrange(numberOfEpochs):
# Recover last trained epoch
numberOfEpochsTrained = myLiviaNet3D.numberOfEpochsTrained
print(" ============== EPOCH: {}/{} =================".format(numberOfEpochsTrained+1,numberOfEpochs))
costsOfEpoch = []
for subE_i in xrange(numberOfSubEpochs):
epoch_nr = subE_i+1
print (" --- SubEPOCH: {}/{}".format(epoch_nr,myLiviaNet3D.numberOfSubEpochs))
# Get all the samples that will be used in this sub-epoch
[imagesSamplesAll,
gt_samplesAll] = getSamplesSubepoch(numberOfSamplesSupEpoch,
imageNames_Train,
groundTruthNames_Train,
roiNames_Train,
imageType,
sampleSize_Train,
receptiveField,
applyPadding
)
# Variable that will contain weights for the cost function
# --- In its current implementation, all the classes have the same weight
weightsCostFunction = np.ones(myLiviaNet3D.n_classes, dtype='float32')
numberBatches = len(imagesSamplesAll) / myLiviaNet3D.batch_Size
myLiviaNet3D.trainingData_x.set_value(imagesSamplesAll, borrow=True)
myLiviaNet3D.trainingData_y.set_value(gt_samplesAll, borrow=True)
costsOfBatches = []
evalResultsSubepoch = np.zeros([ myLiviaNet3D.n_classes, 4 ], dtype="int32")
for b_i in xrange(numberBatches):
# TODO: Make a line that adds a point at each trained batch (Or percentage being updated)
costErrors = myLiviaNet3D.networkModel_Train(b_i, weightsCostFunction)
meanBatchCostError = costErrors[0]
costsOfBatches.append(meanBatchCostError)
myLiviaNet3D.updateLayersMatricesBatchNorm()
#======== Calculate and Report accuracy over subepoch
meanCostOfSubepoch = sum(costsOfBatches) / float(numberBatches)
print(" ---------- Cost of this subEpoch: {}".format(meanCostOfSubepoch))
# Release data
myLiviaNet3D.trainingData_x.set_value(np.zeros([1,1,1,1,1], dtype="float32"))
myLiviaNet3D.trainingData_y.set_value(np.zeros([1,1,1,1], dtype="float32"))
# Get mean cost epoch
costsOfEpoch.append(meanCostOfSubepoch)
meanCostOfEpoch = sum(costsOfEpoch) / float(numberOfSubEpochs)
# Include the epoch cost to the main training cost and update current mean
trainingCost.append(meanCostOfEpoch)
currentMeanCost = sum(trainingCost) / float(str( e_i + 1))
print(" ---------- Training on Epoch #" + str(e_i) + " finished ----------" )
print(" ---------- Cost of Epoch: {} / Mean training error {}".format(meanCostOfEpoch,currentMeanCost))
print(" -------------------------------------------------------- " )
# ------------- Update Learning Rate if required ----------------#
if e_i >= myLiviaNet3D.firstEpochChangeLR :
if learningRateModifiedEpoch == 0:
currentLR = myLiviaNet3D.learning_rate.get_value()
newLR = currentLR / 2.0
myLiviaNet3D.learning_rate.set_value(newLR)
print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
learningRateModifiedEpoch = e_i
else:
if (e_i) == (learningRateModifiedEpoch + myLiviaNet3D.frequencyChangeLR):
currentLR = myLiviaNet3D.learning_rate.get_value()
newLR = currentLR / 2.0
myLiviaNet3D.learning_rate.set_value(newLR)
print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
learningRateModifiedEpoch = e_i
# ---------------------- Start validation ---------------------- #
numberImagesToSegment = len(imageNames_Val)
print(" ********************** Starting validation **********************")
# Run over the images to segment
for i_d in xrange(numberImagesToSegment) :
print("------------- Segmenting subject: {} ....total: {}/{}... -------------".format(names_Val[i_d],str(i_d+1),str(numberImagesToSegment)))
strideValues = myLiviaNet3D.lastLayer.outputShapeTest[2:]
segmentVolume(myLiviaNet3D,
i_d,
imageNames_Val, # Full path
names_Val, # Only image name
groundTruthNames_Val,
roiNames_Val,
imageType,
applyPadding,
receptiveField,
sampleSize_Train,
strideValues,
myLiviaNet3D.batch_Size,
0 # Validation (0) or testing (1)
)
print(" ********************** Validation DONE ********************** ")
# ------ In this point the training is done at Epoch n ---------#
# Increase number of epochs trained
myLiviaNet3D.numberOfEpochsTrained += 1
# --------------- Save the model ---------------
BASE_DIR = os.getcwd()
path_Temp = os.path.join(BASE_DIR,'outputFiles')
netFolderName = os.path.join(path_Temp,myLiviaNet3D.folderName)
netFolderName = os.path.join(netFolderName,'Networks')
modelFileName = netFolderName + "/" + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
dump_model_to_gzip_file(myLiviaNet3D, modelFileName)
strFinal = " Network model saved in " + netFolderName + " as " + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
print (strFinal)
print("................ The whole Training is done.....")
print(" ************************************************************************************ ")
| 48.340824
| 153
| 0.581777
|
import os
import numpy as np
from Modules.IO.sampling import getSamplesSubepoch
from Modules.General.Utils import dump_model_to_gzip_file
from Modules.General.Utils import getImagesSet
from Modules.General.Utils import load_model_from_gzip_file
from Modules.Parsers.parsersUtils import parserConfigIni
from startTesting import segmentVolume
def startTraining(networkModelName,configIniName):
print (" ************************************************ STARTING TRAINING **************************************************")
print (" ********************** Starting training model (Reading parameters) **********************")
myParserConfigIni = parserConfigIni()
myParserConfigIni.readConfigIniFile(configIniName,1)
imageType = myParserConfigIni.imageTypesTrain
print (" --- Do training in {} epochs with {} subEpochs each...".format(myParserConfigIni.numberOfEpochs, myParserConfigIni.numberOfSubEpochs))
print ("-------- Reading Images names used in training/validation -------------")
from sklearn.cross_validation import LeaveOneOut
loo = LeaveOneOut(4)
y1 = myParserConfigIni.indexesForTraining
x1 = myParserConfigIni.indexesForValidation
for train_index, test_index in loo:
print("TRAIN:", train_index, "TEST:", test_index)
y, x = np.array(y1)[train_index], np.array(y1)[test_index]
(imageNames_Train, names_Train) = getImagesSet(myParserConfigIni.imagesFolder,y) (groundTruthNames_Train, gt_names_Train) = getImagesSet(myParserConfigIni.GroundTruthFolder,y) (roiNames_Train, roi_names_Train) = getImagesSet(myParserConfigIni.ROIFolder,y)
(imageNames_Val, names_Val) = getImagesSet(myParserConfigIni.imagesFolder,x) (groundTruthNames_Val, gt_names_Val) = getImagesSet(myParserConfigIni.GroundTruthFolder,x) (roiNames_Val, roi_names_Val) = getImagesSet(myParserConfigIni.ROIFolder,x)
print (" ================== Images for training ================")
for i in range(0,len(names_Train)):
if len(roi_names_Train) > 0:
print(" Image({}): {} | GT: {} | ROI {} ".format(i,names_Train[i], gt_names_Train[i], roi_names_Train[i] ))
else:
print(" Image({}): {} | GT: {} ".format(i,names_Train[i], gt_names_Train[i] ))
print (" ================== Images for validation ================")
for i in range(0,len(names_Val)):
if len(roi_names_Train) > 0:
print(" Image({}): {} | GT: {} | ROI {} ".format(i,names_Val[i], gt_names_Val[i], roi_names_Val[i] ))
else:
print(" Image({}): {} | GT: {} ".format(i,names_Val[i], gt_names_Val[i]))
print (" ===============================================================")
print (" ... Loading model from {}".format(networkModelName))
myLiviaNet3D = load_model_from_gzip_file(networkModelName)
print (" ... Network architecture successfully loaded....")
myLiviaNet3D.numberOfEpochs = myParserConfigIni.numberOfEpochs
myLiviaNet3D.numberOfSubEpochs = myParserConfigIni.numberOfSubEpochs
myLiviaNet3D.numberOfSamplesSupEpoch = myParserConfigIni.numberOfSamplesSupEpoch
myLiviaNet3D.firstEpochChangeLR = myParserConfigIni.firstEpochChangeLR
myLiviaNet3D.frequencyChangeLR = myParserConfigIni.frequencyChangeLR
numberOfEpochs = myLiviaNet3D.numberOfEpochs
numberOfSubEpochs = myLiviaNet3D.numberOfSubEpochs
numberOfSamplesSupEpoch = myLiviaNet3D.numberOfSamplesSupEpoch
receptiveField = myLiviaNet3D.receptiveField
sampleSize_Train = myLiviaNet3D.sampleSize_Train
trainingCost = []
if myParserConfigIni.applyPadding == 1:
applyPadding = True
else:
applyPadding = False
learningRateModifiedEpoch = 0
for e_i in xrange(numberOfEpochs):
numberOfEpochsTrained = myLiviaNet3D.numberOfEpochsTrained
print(" ============== EPOCH: {}/{} =================".format(numberOfEpochsTrained+1,numberOfEpochs))
costsOfEpoch = []
for subE_i in xrange(numberOfSubEpochs):
epoch_nr = subE_i+1
print (" --- SubEPOCH: {}/{}".format(epoch_nr,myLiviaNet3D.numberOfSubEpochs))
[imagesSamplesAll,
gt_samplesAll] = getSamplesSubepoch(numberOfSamplesSupEpoch,
imageNames_Train,
groundTruthNames_Train,
roiNames_Train,
imageType,
sampleSize_Train,
receptiveField,
applyPadding
)
weightsCostFunction = np.ones(myLiviaNet3D.n_classes, dtype='float32')
numberBatches = len(imagesSamplesAll) / myLiviaNet3D.batch_Size
myLiviaNet3D.trainingData_x.set_value(imagesSamplesAll, borrow=True)
myLiviaNet3D.trainingData_y.set_value(gt_samplesAll, borrow=True)
costsOfBatches = []
evalResultsSubepoch = np.zeros([ myLiviaNet3D.n_classes, 4 ], dtype="int32")
for b_i in xrange(numberBatches):
costErrors = myLiviaNet3D.networkModel_Train(b_i, weightsCostFunction)
meanBatchCostError = costErrors[0]
costsOfBatches.append(meanBatchCostError)
myLiviaNet3D.updateLayersMatricesBatchNorm()
meanCostOfSubepoch = sum(costsOfBatches) / float(numberBatches)
print(" ---------- Cost of this subEpoch: {}".format(meanCostOfSubepoch))
myLiviaNet3D.trainingData_x.set_value(np.zeros([1,1,1,1,1], dtype="float32"))
myLiviaNet3D.trainingData_y.set_value(np.zeros([1,1,1,1], dtype="float32"))
costsOfEpoch.append(meanCostOfSubepoch)
meanCostOfEpoch = sum(costsOfEpoch) / float(numberOfSubEpochs)
trainingCost.append(meanCostOfEpoch)
currentMeanCost = sum(trainingCost) / float(str( e_i + 1))
print(" ---------- Training on Epoch #" + str(e_i) + " finished ----------" )
print(" ---------- Cost of Epoch: {} / Mean training error {}".format(meanCostOfEpoch,currentMeanCost))
print(" -------------------------------------------------------- " )
if e_i >= myLiviaNet3D.firstEpochChangeLR :
if learningRateModifiedEpoch == 0:
currentLR = myLiviaNet3D.learning_rate.get_value()
newLR = currentLR / 2.0
myLiviaNet3D.learning_rate.set_value(newLR)
print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
learningRateModifiedEpoch = e_i
else:
if (e_i) == (learningRateModifiedEpoch + myLiviaNet3D.frequencyChangeLR):
currentLR = myLiviaNet3D.learning_rate.get_value()
newLR = currentLR / 2.0
myLiviaNet3D.learning_rate.set_value(newLR)
print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
learningRateModifiedEpoch = e_i
numberImagesToSegment = len(imageNames_Val)
print(" ********************** Starting validation **********************")
for i_d in xrange(numberImagesToSegment) :
print("------------- Segmenting subject: {} ....total: {}/{}... -------------".format(names_Val[i_d],str(i_d+1),str(numberImagesToSegment)))
strideValues = myLiviaNet3D.lastLayer.outputShapeTest[2:]
segmentVolume(myLiviaNet3D,
i_d,
imageNames_Val, names_Val, groundTruthNames_Val,
roiNames_Val,
imageType,
applyPadding,
receptiveField,
sampleSize_Train,
strideValues,
myLiviaNet3D.batch_Size,
0 )
print(" ********************** Validation DONE ********************** ")
myLiviaNet3D.numberOfEpochsTrained += 1
BASE_DIR = os.getcwd()
path_Temp = os.path.join(BASE_DIR,'outputFiles')
netFolderName = os.path.join(path_Temp,myLiviaNet3D.folderName)
netFolderName = os.path.join(netFolderName,'Networks')
modelFileName = netFolderName + "/" + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
dump_model_to_gzip_file(myLiviaNet3D, modelFileName)
strFinal = " Network model saved in " + netFolderName + " as " + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
print (strFinal)
print("................ The whole Training is done.....")
print(" ************************************************************************************ ")
| true
| true
|
f7045e707bad5fe79cb0eae215451a05a660f48a
| 13,388
|
py
|
Python
|
potion/envs/minigolf.py
|
T3p/policy-optimization
|
77006545779823737c4ca3b19e9d80506015c132
|
[
"MIT"
] | null | null | null |
potion/envs/minigolf.py
|
T3p/policy-optimization
|
77006545779823737c4ca3b19e9d80506015c132
|
[
"MIT"
] | null | null | null |
potion/envs/minigolf.py
|
T3p/policy-optimization
|
77006545779823737c4ca3b19e9d80506015c132
|
[
"MIT"
] | 1
|
2019-09-08T15:11:55.000Z
|
2019-09-08T15:11:55.000Z
|
from numbers import Number
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import math as m
from scipy.stats import norm
"""
Minigolf task.
References
----------
- Penner, A. R. "The physics of putting." Canadian Journal of Physics 80.2 (2002): 83-96.
"""
class MiniGolf(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self):
self.min_pos = 0.0
self.max_pos = 20.0
self.min_action = 1e-5
self.max_action = 10.0
self.putter_length = 1.0 # [0.7:1.0]
self.friction = 0.131 # [0.065:0.196]
self.hole_size = 0.10 # [0.10:0.15]
self.sigma_noise = 0.3
self.ball_radius = 0.02135
self.min_variance = 1e-2 # Minimum variance for computing the densities
# gym attributes
self.viewer = None
low = np.array([self.min_pos])
high = np.array([self.max_pos])
self.action_space = spaces.Box(low=self.min_action,
high=self.max_action,
shape=(1,), dtype=float)
self.observation_space = spaces.Box(low=low, high=high, dtype=float)
# initialize state
self.seed()
self.reset()
def setParams(self, env_param):
self.putter_length = env_param[0]
self.friction = env_param[1]
self.hole_size = env_param[2]
self.sigma_noise = m.sqrt(env_param[-1])
def step(self, action, render=False):
action = np.clip(action, self.min_action, self.max_action / 2)
noise = 10
while abs(noise) > 1:
noise = self.np_random.randn() * self.sigma_noise
u = action * self.putter_length * (1 + noise)
deceleration = 5 / 7 * self.friction * 9.81
t = u / deceleration
xn = self.state - u * t + 0.5 * deceleration * t ** 2
reward = 0
done = True
if self.state > 0:
reward = -1
done = False
elif self.state < -4:
reward = -100
self.state = xn
return self.get_state(), float(reward), done, {'state': self.get_state(), 'action': action, 'danger': float(self.state) < -4}
# Custom param for transfer
def getEnvParam(self):
return np.asarray([np.ravel(self.putter_length), np.ravel(self.friction), np.ravel(self.hole_size),
np.ravel(self.sigma_noise ** 2)])
def reset(self, state=None):
if state is None:
self.state = np.array([self.np_random.uniform(low=self.min_pos,
high=self.max_pos)])
else:
self.state = np.array(state)
return self.get_state()
def get_state(self):
return np.array(self.state)
def get_true_state(self):
"""For testing purposes"""
return np.array(self.state)
def clip_state(self, state):
return state
# return np.clip(state, self.min_pos, self.max_pos)
def clip_action(self, action):
return action
# return np.clip(action, self.min_action, self.max_action)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def getDensity_old(self, env_parameters, state, action, next_state):
if state < next_state:
return 0
action = np.clip(action, self.min_action, self.max_action / 2)
action = 1e-8 if action == 0 else action
putter_length = env_parameters[0]
friction = env_parameters[1]
sigma_noise = env_parameters[-1]
deceleration = 5 / 7 * friction * 9.81
u = np.sqrt(2 * deceleration * (state - next_state))
noise = (u / (action * putter_length) - 1) / sigma_noise
return norm.pdf(noise)
def density_old(self, env_parameters, state, action, next_state):
"""
:param env_parameters: list of env_params
:param state: NxTx1
:param action: NxT
:param next_state: NxTx1
:return: pdf NxTx1xn_param
"""
assert state.ndim == 4 and action.ndim == 3 and next_state.ndim == 4
mask = state < next_state
action = np.clip(action, self.min_action, self.max_action / 2)
action[action == 0] = 1e-8
pdf = np.zeros((state.shape[0], state.shape[1], 1, env_parameters.shape[0]))
diff = np.abs(state - next_state) # take the abs for the sqrt, but mask negative values later
for i in range(env_parameters.shape[0]):
deceleration = 5 / 7 * env_parameters[i, 1] * 9.81
u = np.sqrt(2 * deceleration * diff[:, :, :, i])
noise = (u / (action[:, :, np.newaxis, i] * env_parameters[i, 0]) - 1) / env_parameters[i, -1]
pdf[:, :, :, i] = norm.pdf(noise) * (1 - mask[:, :, :, i]) # set to zero impossible transitions
return pdf[:, :, 0, :]
def densityCurrent_old(self, state, action, next_state):
"""
:param state: NxTx1
:param action: NxT
:param next_state: NxTx1
:return: pdf NxTx1xn_param
"""
assert state.ndim == 3 and action.ndim == 2 and next_state.ndim == 3
mask = state < next_state
action = np.clip(action, self.min_action, self.max_action / 2)
action[action == 0] = 1e-8
diff = np.abs(state - next_state) # take the abs for the sqrt, but mask negative values later
deceleration = 5 / 7 * self.friction * 9.81
u = np.sqrt(2 * deceleration * diff)
noise = (u / (action[:, :, np.newaxis] * self.putter_length) - 1) / self.sigma_noise
pdf = norm.pdf(noise) * (1 - mask) # set to zero impossible transitions
return pdf[:, :, 0]
def stepDenoisedCurrent_old(self, state, action):
"""
Computes steps without noise.
"""
assert state.ndim == 3 and action.ndim == 2
action = np.clip(action, self.min_action, self.max_action / 2)[:, :, np.newaxis]
u = action * self.putter_length
deceleration = 5 / 7 * self.friction * 9.81
t = u / deceleration
return state - u * t + 0.5 * deceleration * t ** 2
def stepDenoisedCurrent(self, state, action):
"""
Computes the mean transitions.
"""
assert state.ndim == 3 and action.ndim == 2
action = np.clip(action, self.min_action, self.max_action / 2)[:, :, np.newaxis]
u = action * self.putter_length
deceleration = 5 / 7 * self.friction * 9.81
return state - 0.5 * u ** 2 * (1 + self.sigma_noise ** 2) / deceleration
def variance(self, action):
"""
Next-state variance given the action
"""
assert action.ndim == 2
deceleration = 5 / 7 * self.friction * 9.81
action = np.clip(action, self.min_action, self.max_action / 2)
k = action ** 2 * self.putter_length ** 2 / (2 * deceleration)
return 2 * k ** 2 * self.sigma_noise ** 2 * (self.sigma_noise ** 2 + 2) + self.min_variance
def densityCurrent(self, state, action, next_state):
"""
:param state: NxTx1
:param action: NxT
:param next_state: NxTx1
:return: pdf NxTx1xn_param
"""
assert state.ndim == 3 and action.ndim == 2 and next_state.ndim == 3
mean_ns = self.stepDenoisedCurrent(state, action)
var_ns = self.variance(action)
return norm.pdf((next_state - mean_ns)[:, :, 0] / np.sqrt(var_ns))
def density(self, env_parameters, state, action, next_state):
"""
:param env_parameters: list of env_params
:param state: NxTx1
:param action: NxT
:param next_state: NxTx1
:return: pdf NxTx1xn_param
"""
assert state.ndim == 4 and action.ndim == 3 and next_state.ndim == 4
action = np.clip(action, self.min_action, self.max_action / 2)
pdf = np.zeros((state.shape[0], state.shape[1], 1, env_parameters.shape[0]))
for i in range(env_parameters.shape[0]):
deceleration = 5 / 7 * env_parameters[i, 1] * 9.81
k = action ** 2 * env_parameters[i, 0] ** 2 / (2 * deceleration)
# Compute mean next-state
mean_ns = state[:, :, :, i] - k[:, :, np.newaxis, i] * (1 + env_parameters[i, -1])
# Compute variance next-state
var_ns = 2 * k[:, :, np.newaxis, i] ** 2 * env_parameters[i, -1] * (
env_parameters[i, -1] + 2) + self.min_variance
pdf[:, :, :, i] = norm.pdf((next_state[:, :, :, i] - mean_ns) / np.sqrt(var_ns))
return pdf[:, :, 0, :]
class ComplexMiniGolf(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self):
self.horizon = 20
self.gamma = 0.99
self.min_pos = 0.0
self.max_pos = 20.0
self.min_action = 1e-5
self.max_action = 10.0
self.putter_length = 1.0 # [0.7:1.0]
# self.friction = 0.131 # [0.065:0.196]
self.friction_low = 0.131
self.friction_high = 0.19 # 0.190
self.hole_size = 0.10 # [0.10:0.15]
self.sigma_noise = 0.3
self.ball_radius = 0.02135
self.min_variance = 1e-2 # Minimum variance for computing the densities
# gym attributes
self.viewer = None
low = np.array([self.min_pos])
high = np.array([self.max_pos])
self.action_space = spaces.Box(low=self.min_action,
high=self.max_action,
shape=(1,))
self.observation_space = spaces.Box(low=low, high=high)
# initialize state
self.seed()
self.reset()
def setParams(self, env_param):
self.putter_length = env_param[0]
self.friction = env_param[1]
self.hole_size = env_param[2]
self.sigma_noise = m.sqrt(env_param[-1])
def computeFriction(self, state):
# if state < (self.max_pos - self.min_pos) / 3:
# friction = self.friction_low
# elif state < (self.max_pos - self.min_pos) * 2 / 3:
# friction = self.friction_low
# else:
# friction = self.friction_high
# return friction
delta_f = self.friction_high - self.friction_low
delta_p = self.max_pos - self.min_pos
return self.friction_low + (delta_f / delta_p) * state
def step(self, action, render=False):
action = np.clip(action, self.min_action, self.max_action / 2)
noise = 10
while abs(noise) > 1:
noise = self.np_random.randn() * self.sigma_noise
u = action * self.putter_length * (1 + noise)
friction = self.computeFriction(self.state)
deceleration = 5 / 7 * friction * 9.81
t = u / deceleration
xn = self.state - u * t + 0.5 * deceleration * t ** 2
# reward = 0
# done = True
# if u < v_min:
# reward = -1
# done = False
# elif u > v_max:
# reward = -100
reward = 0
done = True
if self.state > 0:
reward = -1
done = False
elif self.state < -4:
reward = -100
state = self.state
self.state = xn
# TODO the last three values should not be used
return self.get_state(), float(reward), done, {"state": state, "next_state": self.state, "action": action}
# Custom param for transfer
def getEnvParam(self):
return np.asarray([np.ravel(self.putter_length), np.ravel(self.friction), np.ravel(self.hole_size),
np.ravel(self.sigma_noise ** 2)])
def reset(self, state=None):
# TODO change reset
if state is None:
self.state = np.array([self.np_random.uniform(low=self.min_pos,
high=self.max_pos)])
else:
self.state = np.array(state)
return self.get_state()
def get_state(self):
return np.array(self.state)
def get_true_state(self):
"""For testing purposes"""
return np.array(self.state)
def clip_state(self, state):
return state
# return np.clip(state, self.min_pos, self.max_pos)
def clip_action(self, action):
return action
# return np.clip(action, self.min_action, self.max_action)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reward(self, state, action, next_state):
# FIXME: two problems. (1,probably fixed) When the next_state is less than state. (2) reward of -100 is never returned
friction = self.computeFriction(state)
deceleration = 5 / 7 * friction * 9.81
u = np.sqrt(2 * deceleration * max((state - next_state), 0))
v_min = np.sqrt(10 / 7 * friction * 9.81 * state)
v_max = np.sqrt((2 * self.hole_size - self.ball_radius) ** 2 * (9.81 / (2 * self.ball_radius)) + v_min ** 2)
reward = 0
done = True
if u < v_min:
reward = -1
done = False
elif u > v_max:
reward = -100
return reward, done
| 33.386534
| 133
| 0.561025
|
from numbers import Number
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import math as m
from scipy.stats import norm
class MiniGolf(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self):
self.min_pos = 0.0
self.max_pos = 20.0
self.min_action = 1e-5
self.max_action = 10.0
self.putter_length = 1.0 self.friction = 0.131 self.hole_size = 0.10 self.sigma_noise = 0.3
self.ball_radius = 0.02135
self.min_variance = 1e-2
self.viewer = None
low = np.array([self.min_pos])
high = np.array([self.max_pos])
self.action_space = spaces.Box(low=self.min_action,
high=self.max_action,
shape=(1,), dtype=float)
self.observation_space = spaces.Box(low=low, high=high, dtype=float)
self.seed()
self.reset()
def setParams(self, env_param):
self.putter_length = env_param[0]
self.friction = env_param[1]
self.hole_size = env_param[2]
self.sigma_noise = m.sqrt(env_param[-1])
def step(self, action, render=False):
action = np.clip(action, self.min_action, self.max_action / 2)
noise = 10
while abs(noise) > 1:
noise = self.np_random.randn() * self.sigma_noise
u = action * self.putter_length * (1 + noise)
deceleration = 5 / 7 * self.friction * 9.81
t = u / deceleration
xn = self.state - u * t + 0.5 * deceleration * t ** 2
reward = 0
done = True
if self.state > 0:
reward = -1
done = False
elif self.state < -4:
reward = -100
self.state = xn
return self.get_state(), float(reward), done, {'state': self.get_state(), 'action': action, 'danger': float(self.state) < -4}
def getEnvParam(self):
return np.asarray([np.ravel(self.putter_length), np.ravel(self.friction), np.ravel(self.hole_size),
np.ravel(self.sigma_noise ** 2)])
def reset(self, state=None):
if state is None:
self.state = np.array([self.np_random.uniform(low=self.min_pos,
high=self.max_pos)])
else:
self.state = np.array(state)
return self.get_state()
def get_state(self):
return np.array(self.state)
def get_true_state(self):
return np.array(self.state)
def clip_state(self, state):
return state
def clip_action(self, action):
return action
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def getDensity_old(self, env_parameters, state, action, next_state):
if state < next_state:
return 0
action = np.clip(action, self.min_action, self.max_action / 2)
action = 1e-8 if action == 0 else action
putter_length = env_parameters[0]
friction = env_parameters[1]
sigma_noise = env_parameters[-1]
deceleration = 5 / 7 * friction * 9.81
u = np.sqrt(2 * deceleration * (state - next_state))
noise = (u / (action * putter_length) - 1) / sigma_noise
return norm.pdf(noise)
def density_old(self, env_parameters, state, action, next_state):
assert state.ndim == 4 and action.ndim == 3 and next_state.ndim == 4
mask = state < next_state
action = np.clip(action, self.min_action, self.max_action / 2)
action[action == 0] = 1e-8
pdf = np.zeros((state.shape[0], state.shape[1], 1, env_parameters.shape[0]))
diff = np.abs(state - next_state)
for i in range(env_parameters.shape[0]):
deceleration = 5 / 7 * env_parameters[i, 1] * 9.81
u = np.sqrt(2 * deceleration * diff[:, :, :, i])
noise = (u / (action[:, :, np.newaxis, i] * env_parameters[i, 0]) - 1) / env_parameters[i, -1]
pdf[:, :, :, i] = norm.pdf(noise) * (1 - mask[:, :, :, i])
return pdf[:, :, 0, :]
def densityCurrent_old(self, state, action, next_state):
assert state.ndim == 3 and action.ndim == 2 and next_state.ndim == 3
mask = state < next_state
action = np.clip(action, self.min_action, self.max_action / 2)
action[action == 0] = 1e-8
diff = np.abs(state - next_state)
deceleration = 5 / 7 * self.friction * 9.81
u = np.sqrt(2 * deceleration * diff)
noise = (u / (action[:, :, np.newaxis] * self.putter_length) - 1) / self.sigma_noise
pdf = norm.pdf(noise) * (1 - mask)
return pdf[:, :, 0]
def stepDenoisedCurrent_old(self, state, action):
assert state.ndim == 3 and action.ndim == 2
action = np.clip(action, self.min_action, self.max_action / 2)[:, :, np.newaxis]
u = action * self.putter_length
deceleration = 5 / 7 * self.friction * 9.81
t = u / deceleration
return state - u * t + 0.5 * deceleration * t ** 2
def stepDenoisedCurrent(self, state, action):
assert state.ndim == 3 and action.ndim == 2
action = np.clip(action, self.min_action, self.max_action / 2)[:, :, np.newaxis]
u = action * self.putter_length
deceleration = 5 / 7 * self.friction * 9.81
return state - 0.5 * u ** 2 * (1 + self.sigma_noise ** 2) / deceleration
def variance(self, action):
assert action.ndim == 2
deceleration = 5 / 7 * self.friction * 9.81
action = np.clip(action, self.min_action, self.max_action / 2)
k = action ** 2 * self.putter_length ** 2 / (2 * deceleration)
return 2 * k ** 2 * self.sigma_noise ** 2 * (self.sigma_noise ** 2 + 2) + self.min_variance
def densityCurrent(self, state, action, next_state):
assert state.ndim == 3 and action.ndim == 2 and next_state.ndim == 3
mean_ns = self.stepDenoisedCurrent(state, action)
var_ns = self.variance(action)
return norm.pdf((next_state - mean_ns)[:, :, 0] / np.sqrt(var_ns))
def density(self, env_parameters, state, action, next_state):
assert state.ndim == 4 and action.ndim == 3 and next_state.ndim == 4
action = np.clip(action, self.min_action, self.max_action / 2)
pdf = np.zeros((state.shape[0], state.shape[1], 1, env_parameters.shape[0]))
for i in range(env_parameters.shape[0]):
deceleration = 5 / 7 * env_parameters[i, 1] * 9.81
k = action ** 2 * env_parameters[i, 0] ** 2 / (2 * deceleration)
mean_ns = state[:, :, :, i] - k[:, :, np.newaxis, i] * (1 + env_parameters[i, -1])
var_ns = 2 * k[:, :, np.newaxis, i] ** 2 * env_parameters[i, -1] * (
env_parameters[i, -1] + 2) + self.min_variance
pdf[:, :, :, i] = norm.pdf((next_state[:, :, :, i] - mean_ns) / np.sqrt(var_ns))
return pdf[:, :, 0, :]
class ComplexMiniGolf(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self):
self.horizon = 20
self.gamma = 0.99
self.min_pos = 0.0
self.max_pos = 20.0
self.min_action = 1e-5
self.max_action = 10.0
self.putter_length = 1.0 self.friction_low = 0.131
self.friction_high = 0.19 self.hole_size = 0.10 self.sigma_noise = 0.3
self.ball_radius = 0.02135
self.min_variance = 1e-2
self.viewer = None
low = np.array([self.min_pos])
high = np.array([self.max_pos])
self.action_space = spaces.Box(low=self.min_action,
high=self.max_action,
shape=(1,))
self.observation_space = spaces.Box(low=low, high=high)
self.seed()
self.reset()
def setParams(self, env_param):
self.putter_length = env_param[0]
self.friction = env_param[1]
self.hole_size = env_param[2]
self.sigma_noise = m.sqrt(env_param[-1])
def computeFriction(self, state):
delta_f = self.friction_high - self.friction_low
delta_p = self.max_pos - self.min_pos
return self.friction_low + (delta_f / delta_p) * state
def step(self, action, render=False):
action = np.clip(action, self.min_action, self.max_action / 2)
noise = 10
while abs(noise) > 1:
noise = self.np_random.randn() * self.sigma_noise
u = action * self.putter_length * (1 + noise)
friction = self.computeFriction(self.state)
deceleration = 5 / 7 * friction * 9.81
t = u / deceleration
xn = self.state - u * t + 0.5 * deceleration * t ** 2
reward = 0
done = True
if self.state > 0:
reward = -1
done = False
elif self.state < -4:
reward = -100
state = self.state
self.state = xn
return self.get_state(), float(reward), done, {"state": state, "next_state": self.state, "action": action}
def getEnvParam(self):
return np.asarray([np.ravel(self.putter_length), np.ravel(self.friction), np.ravel(self.hole_size),
np.ravel(self.sigma_noise ** 2)])
def reset(self, state=None):
if state is None:
self.state = np.array([self.np_random.uniform(low=self.min_pos,
high=self.max_pos)])
else:
self.state = np.array(state)
return self.get_state()
def get_state(self):
return np.array(self.state)
def get_true_state(self):
return np.array(self.state)
def clip_state(self, state):
return state
def clip_action(self, action):
return action
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reward(self, state, action, next_state):
friction = self.computeFriction(state)
deceleration = 5 / 7 * friction * 9.81
u = np.sqrt(2 * deceleration * max((state - next_state), 0))
v_min = np.sqrt(10 / 7 * friction * 9.81 * state)
v_max = np.sqrt((2 * self.hole_size - self.ball_radius) ** 2 * (9.81 / (2 * self.ball_radius)) + v_min ** 2)
reward = 0
done = True
if u < v_min:
reward = -1
done = False
elif u > v_max:
reward = -100
return reward, done
| true
| true
|
f7045efad70ff6e1be66e1bccf1a06a420b019bb
| 636
|
py
|
Python
|
Pyduino/Boards/Uno.py
|
ItzTheDodo/Pyduino
|
a68d6a3214d5fb452e8b8e53cb013ee7205734bb
|
[
"Apache-2.0"
] | null | null | null |
Pyduino/Boards/Uno.py
|
ItzTheDodo/Pyduino
|
a68d6a3214d5fb452e8b8e53cb013ee7205734bb
|
[
"Apache-2.0"
] | null | null | null |
Pyduino/Boards/Uno.py
|
ItzTheDodo/Pyduino
|
a68d6a3214d5fb452e8b8e53cb013ee7205734bb
|
[
"Apache-2.0"
] | null | null | null |
class UnoInfo:
def __init__(self):
self.dataPins = 13
self.analogInPins = 5
self.GND = 3
self.pow = [3.3, 5]
self.TX = 1
self.RX = 0
def getMainInfo(self):
return {"0": self.dataPins, "1": self.GND, "2": self.pow}
def getDigitalPins(self):
return self.dataPins
def getAnalogPins(self):
return self.analogInPins
def getAmountGND(self):
return self.GND
def getPowOut(self):
return self.pow
def getTXSlot(self):
return self.TX
def getRXSlot(self):
return self.RX
| 19.875
| 66
| 0.536164
|
class UnoInfo:
def __init__(self):
self.dataPins = 13
self.analogInPins = 5
self.GND = 3
self.pow = [3.3, 5]
self.TX = 1
self.RX = 0
def getMainInfo(self):
return {"0": self.dataPins, "1": self.GND, "2": self.pow}
def getDigitalPins(self):
return self.dataPins
def getAnalogPins(self):
return self.analogInPins
def getAmountGND(self):
return self.GND
def getPowOut(self):
return self.pow
def getTXSlot(self):
return self.TX
def getRXSlot(self):
return self.RX
| true
| true
|
f7046024f61186b826309ccf12e7b065fb9976cb
| 952
|
py
|
Python
|
Python/Simple-Sender-Receiver/receiver-tls.py
|
nplab/IOT-Project
|
b0c1f2b5f4c130ef4e4933801da8792a95609fb4
|
[
"BSD-3-Clause"
] | null | null | null |
Python/Simple-Sender-Receiver/receiver-tls.py
|
nplab/IOT-Project
|
b0c1f2b5f4c130ef4e4933801da8792a95609fb4
|
[
"BSD-3-Clause"
] | null | null | null |
Python/Simple-Sender-Receiver/receiver-tls.py
|
nplab/IOT-Project
|
b0c1f2b5f4c130ef4e4933801da8792a95609fb4
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import paho.mqtt.client as mqtt
import json
import random
import math
import time
import ssl
config_mqtt_broker_ip = "iot.fh-muenster.de"
config_mqtt_client_id = "dummy-receiver-" + str(random.randint(1000, 9999));
config_mqtt_topic = "sensor/60:01:94:4A:AF:7A"
ts_last_message = int(round(time.time() * 1000))
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
client.subscribe(config_mqtt_topic)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
mqtt_c = mqtt.Client(config_mqtt_client_id)
mqtt_c.on_connect = on_connect
mqtt_c.on_message = on_message
mqtt_c.tls_set(ca_certs="ca.pem")
#mqtt_c.tls_insecure_set(True)
mqtt_c.connect(config_mqtt_broker_ip, 8883, 60)
mqtt_c.loop_forever();
| 27.2
| 79
| 0.767857
|
import paho.mqtt.client as mqtt
import json
import random
import math
import time
import ssl
config_mqtt_broker_ip = "iot.fh-muenster.de"
config_mqtt_client_id = "dummy-receiver-" + str(random.randint(1000, 9999));
config_mqtt_topic = "sensor/60:01:94:4A:AF:7A"
ts_last_message = int(round(time.time() * 1000))
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
client.subscribe(config_mqtt_topic)
def on_message(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
mqtt_c = mqtt.Client(config_mqtt_client_id)
mqtt_c.on_connect = on_connect
mqtt_c.on_message = on_message
mqtt_c.tls_set(ca_certs="ca.pem")
mqtt_c.connect(config_mqtt_broker_ip, 8883, 60)
mqtt_c.loop_forever();
| true
| true
|
f704617f9290ee2bf253dd9c32d76dbfa0b5aedf
| 5,785
|
py
|
Python
|
WebScraping2.py
|
jenildesai25/WebScrapping
|
41937094a7963d53ab09e3ceff055dca4a95f13f
|
[
"MIT"
] | null | null | null |
WebScraping2.py
|
jenildesai25/WebScrapping
|
41937094a7963d53ab09e3ceff055dca4a95f13f
|
[
"MIT"
] | null | null | null |
WebScraping2.py
|
jenildesai25/WebScrapping
|
41937094a7963d53ab09e3ceff055dca4a95f13f
|
[
"MIT"
] | null | null | null |
# Online References used :
# https://github.com/imadmali/movie-scraper/blob/master/MojoLinkExtract.py
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/
# https://nycdatascience.com/blog/student-works/scraping-box-office-mojo/
# https://www.youtube.com/watch?v=XQgXKtPSzUI
# https://www.youtube.com/watch?v=aIPqt-OdmS0
# https://www.youtube.com/watch?v=XQgXKtPSzUI
from bs4 import BeautifulSoup
import pandas as pd
import os
import requests
import glob
import re
def scrape_data_for_actors():
file_path = os.path.join(os.path.join(os.environ['USERPROFILE']),
'Desktop') # This is written in order to save the txt file in the user's specified location on the machine
file_path = os.path.join(file_path,
'BoxOfficeMojo2_virti_bipin') # Folder name to be created where the file will be stored
if not os.path.exists(str(file_path)):
os.mkdir(str(file_path)) # If path does not exist create the path
os.chdir(file_path) # Change the directory of the file path
if len(glob.glob(
"*")) != 0: # The glob module finds all the pathnames matching a specified pattern according to the rules used by the Unix shell
file_list = glob.glob("*")
for file in file_list:
os.remove(file)
# The url of the BoxOffice Mojo to be scraped
url = 'https://www.boxofficemojo.com/people/?view=Actor&pagenum=1&sort=sumgross&order=DESC&&p=.htm'
pages_data = [] # List to store the pages data
total_pages = []
response = requests.get(url) # Get the response of the url after passing the user input
soup = BeautifulSoup(response.content,
'html.parser') # Using the beautiful soup library to parse the html content and format it
for page in soup.find_all('a', href=lambda href: href and "page" in href): # find the href in a tags
pages_data.append(page['href']) # append the data in the pages_data list
for page in pages_data:
if 'page' in page: # If "page" found in href
index = page.find('page') # Take the index of that page if found
# print("Index", index)
if page[index:index + 10] not in total_pages:
# For extracting the total number of pages
total_pages.append(page[
index:index + 10]) # for example : page=2 so in order to get the total number of pages and iterate through it it goes from 1 till end of pages for pagination
# print("Total Pages", total_pages)
average_gross_list = []
for num in range(1, len(total_pages) + 1, 1):
try:
url = 'https://www.boxofficemojo.com/people/?view=Actor&pagenum={}&sort=sumgross&order=DESC&&p=.htm'.format(num) # This one works well
# Get the Response
print("Page number {}".format(num))
response_from_url = requests.get(url)
html = response_from_url.text
soup = BeautifulSoup(html,
'lxml') # lxml is a pretty extensive library written for parsing XML and HTML documents very quickly
table = soup.find('table', {"cellspacing": "1"})
# Using dataframes
df = pd.read_html(str(table),skiprows=1)
df = df[0]
df = df.iloc[:, :6] # This is used to slice the dataframe to cut off the date sections.
df.columns = ['rank', 'person', 'total gross', 'number of movies', 'Average', 'number 1 picture']
df['id'] = ''
id_list = []
title_list = df['rank'].tolist()
new_index = [i for i in range(1,len(title_list)+1)]
df.index = new_index
for link in soup.findAll('a', {'href': re.compile("\?id=")}):
id_list.append(link.get('href'))
id_list = [x.split('=')[1] for x in id_list]
id_list = [x.split('.')[0] for x in id_list]
id_list = id_list[1:]
id_dict = dict(zip(title_list, id_list))
for index in df.index:
df.loc[index, 'id'] = id_dict[df.loc[index, 'rank']]
df.to_csv("actors.csv", index=False, mode='a')
except Exception as e:
print(e)
continue
file_list = glob.glob("*.csv")
df_container = []
for file in file_list:
df = pd.read_csv(file)
df_container.append(df)
df_combined = pd.concat(df_container)
df_combined.to_csv("actors.txt", index=False, sep="\t")
df = pd.read_csv("actors.txt", sep="\t")
# Data Cleaning
df['Average'] = df['Average'].apply(lambda x: x.replace('$', '')) # replace dollar signs
df['Average'] = df['Average'].apply(lambda x: x.replace(',', '')) # replace commas
df['Average'] = pd.to_numeric(df['Average'], errors='coerce')
df = df.sort_values(by='Average', ascending=False)
actor_with_highest_average_earning = df.iloc[0]['person']
print("actor(s) with the highest average earnings per movie is {}".format(actor_with_highest_average_earning))
new_df = pd.read_csv("actors.txt", sep="\t")
new_df['number of movies'] = pd.to_numeric(new_df['number of movies'], errors='coerce')
actor_most_movies = new_df.loc[new_df['number of movies'].idxmax()].person
print("actor(s) with the maximum number of movies is {}".format(actor_most_movies))
if __name__ == '__main__':
scrape_data_for_actors()
| 46.28
| 197
| 0.584788
|
from bs4 import BeautifulSoup
import pandas as pd
import os
import requests
import glob
import re
def scrape_data_for_actors():
file_path = os.path.join(os.path.join(os.environ['USERPROFILE']),
'Desktop') file_path = os.path.join(file_path,
'BoxOfficeMojo2_virti_bipin') # Folder name to be created where the file will be stored
if not os.path.exists(str(file_path)):
os.mkdir(str(file_path)) # If path does not exist create the path
os.chdir(file_path) # Change the directory of the file path
if len(glob.glob(
"*")) != 0: # The glob module finds all the pathnames matching a specified pattern according to the rules used by the Unix shell
file_list = glob.glob("*")
for file in file_list:
os.remove(file)
# The url of the BoxOffice Mojo to be scraped
url = 'https://www.boxofficemojo.com/people/?view=Actor&pagenum=1&sort=sumgross&order=DESC&&p=.htm'
pages_data = [] # List to store the pages data
total_pages = []
response = requests.get(url) # Get the response of the url after passing the user input
soup = BeautifulSoup(response.content,
'html.parser') # Using the beautiful soup library to parse the html content and format it
for page in soup.find_all('a', href=lambda href: href and "page" in href): # find the href in a tags
pages_data.append(page['href']) # append the data in the pages_data list
for page in pages_data:
if 'page' in page: # If "page" found in href
index = page.find('page') # Take the index of that page if found
# print("Index", index)
if page[index:index + 10] not in total_pages:
# For extracting the total number of pages
total_pages.append(page[
index:index + 10]) # for example : page=2 so in order to get the total number of pages and iterate through it it goes from 1 till end of pages for pagination
# print("Total Pages", total_pages)
average_gross_list = []
for num in range(1, len(total_pages) + 1, 1):
try:
url = 'https://www.boxofficemojo.com/people/?view=Actor&pagenum={}&sort=sumgross&order=DESC&&p=.htm'.format(num) # This one works well
# Get the Response
print("Page number {}".format(num))
response_from_url = requests.get(url)
html = response_from_url.text
soup = BeautifulSoup(html,
'lxml') # lxml is a pretty extensive library written for parsing XML and HTML documents very quickly
table = soup.find('table', {"cellspacing": "1"})
# Using dataframes
df = pd.read_html(str(table),skiprows=1)
df = df[0]
df = df.iloc[:, :6] # This is used to slice the dataframe to cut off the date sections.
df.columns = ['rank', 'person', 'total gross', 'number of movies', 'Average', 'number 1 picture']
df['id'] = ''
id_list = []
title_list = df['rank'].tolist()
new_index = [i for i in range(1,len(title_list)+1)]
df.index = new_index
for link in soup.findAll('a', {'href': re.compile("\?id=")}):
id_list.append(link.get('href'))
id_list = [x.split('=')[1] for x in id_list]
id_list = [x.split('.')[0] for x in id_list]
id_list = id_list[1:]
id_dict = dict(zip(title_list, id_list))
for index in df.index:
df.loc[index, 'id'] = id_dict[df.loc[index, 'rank']]
df.to_csv("actors.csv", index=False, mode='a')
except Exception as e:
print(e)
continue
file_list = glob.glob("*.csv")
df_container = []
for file in file_list:
df = pd.read_csv(file)
df_container.append(df)
df_combined = pd.concat(df_container)
df_combined.to_csv("actors.txt", index=False, sep="\t")
df = pd.read_csv("actors.txt", sep="\t")
# Data Cleaning
df['Average'] = df['Average'].apply(lambda x: x.replace('$', '')) # replace dollar signs
df['Average'] = df['Average'].apply(lambda x: x.replace(',', '')) # replace commas
df['Average'] = pd.to_numeric(df['Average'], errors='coerce')
df = df.sort_values(by='Average', ascending=False)
actor_with_highest_average_earning = df.iloc[0]['person']
print("actor(s) with the highest average earnings per movie is {}".format(actor_with_highest_average_earning))
new_df = pd.read_csv("actors.txt", sep="\t")
new_df['number of movies'] = pd.to_numeric(new_df['number of movies'], errors='coerce')
actor_most_movies = new_df.loc[new_df['number of movies'].idxmax()].person
print("actor(s) with the maximum number of movies is {}".format(actor_most_movies))
if __name__ == '__main__':
scrape_data_for_actors()
| true
| true
|
f70461a1b7fa5f8f95a75d2f5d58265ffdffea63
| 4,593
|
py
|
Python
|
var/spack/repos/builtin/packages/py-pyqt5/package.py
|
fcannini/spack
|
9b3f5f3890025494ffa620d144d22a4734c8fcee
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-pyqt5/package.py
|
fcannini/spack
|
9b3f5f3890025494ffa620d144d22a4734c8fcee
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-pyqt5/package.py
|
fcannini/spack
|
9b3f5f3890025494ffa620d144d22a4734c8fcee
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-03-06T11:04:37.000Z
|
2020-03-06T11:04:37.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class PyPyqt5(SIPPackage):
"""PyQt is a set of Python v2 and v3 bindings for The Qt Company's Qt
application framework and runs on all platforms supported by Qt including
Windows, OS X, Linux, iOS and Android. PyQt5 supports Qt v5."""
homepage = "https://www.riverbankcomputing.com/software/pyqt/intro"
url = "https://www.riverbankcomputing.com/static/Downloads/PyQt5/5.13.0/PyQt5_gpl-5.13.0.tar.gz"
list_url = "https://www.riverbankcomputing.com/software/pyqt/download5"
sip_module = 'PyQt5.sip'
import_modules = [
'PyQt5', 'PyQt5.QtCore', 'PyQt5.QtGui', 'PyQt5.QtHelp',
'PyQt5.QtMultimedia', 'PyQt5.QtMultimediaWidgets', 'PyQt5.QtNetwork',
'PyQt5.QtOpenGL', 'PyQt5.QtPrintSupport', 'PyQt5.QtQml',
'PyQt5.QtQuick', 'PyQt5.QtSvg', 'PyQt5.QtTest', 'PyQt5.QtWebChannel',
'PyQt5.QtWebSockets', 'PyQt5.QtWidgets', 'PyQt5.QtXml',
'PyQt5.QtXmlPatterns'
]
version('5.13.0', sha256='0cdbffe5135926527b61cc3692dd301cd0328dd87eeaf1313e610787c46faff9')
version('5.12.3', sha256='0db0fa37debab147450f9e052286f7a530404e2aaddc438e97a7dcdf56292110')
variant('qsci', default=False, description='Build with QScintilla python bindings')
# Without opengl support, I got the following error:
# sip: QOpenGLFramebufferObject is undefined
depends_on('qt@5:+opengl')
depends_on('python@2.6:', type=('build', 'run'))
depends_on('py-enum34', type=('build', 'run'), when='^python@:3.3')
depends_on('qscintilla', when='+qsci')
# For building Qscintilla python bindings
resource(name='qscintilla',
url='https://www.riverbankcomputing.com/static/Downloads/QScintilla/2.10.2/QScintilla_gpl-2.10.2.tar.gz',
sha256='14b31d20717eed95ea9bea4cd16e5e1b72cee7ebac647cba878e0f6db6a65ed0',
destination='spack-resource-qscintilla',
when='^qscintilla@2.10.2'
)
# https://www.riverbankcomputing.com/static/Docs/PyQt5/installation.html
def configure_args(self):
args = [
'--pyuic5-interpreter', self.spec['python'].command.path,
'--sipdir', self.prefix.share.sip.PyQt5,
'--stubsdir', join_path(site_packages_dir, 'PyQt5'),
]
if '+qsci' in self.spec:
args.extend(['--qsci-api-destdir', self.prefix.share.qsci])
return args
@run_after('install')
def make_qsci(self):
if '+qsci' in self.spec:
rsrc_py_path = os.path.join(
self.stage.source_path,
'spack-resource-qscintilla/QScintilla_gpl-' +
str(self.spec['qscintilla'].version), 'Python')
with working_dir(rsrc_py_path):
pydir = join_path(site_packages_dir, 'PyQt5')
python = self.spec['python'].command
python('configure.py', '--pyqt=PyQt5',
'--sip=' + self.prefix.bin.sip,
'--qsci-incdir=' +
self.spec['qscintilla'].prefix.include,
'--qsci-libdir=' + self.spec['qscintilla'].prefix.lib,
'--qsci-sipdir=' + self.prefix.share.sip.PyQt5,
'--apidir=' + self.prefix.share.qsci,
'--destdir=' + pydir,
'--pyqt-sipdir=' + self.prefix.share.sip.PyQt5,
'--sip-incdir=' + python_include_dir,
'--stubsdir=' + pydir)
# Fix build errors
# "QAbstractScrollArea: No such file or directory"
# "qprinter.h: No such file or directory"
# ".../Qsci.so: undefined symbol: _ZTI10Qsci...."
qscipro = FileFilter('Qsci/Qsci.pro')
link_qscilibs = 'LIBS += -L' + self.prefix.lib +\
' -lqscintilla2_qt5'
qscipro.filter('TEMPLATE = lib',
'TEMPLATE = lib\nQT += widgets' +
'\nQT += printsupport\n' + link_qscilibs)
make()
# Fix installation prefixes
makefile = FileFilter('Makefile')
makefile.filter(r'\$\(INSTALL_ROOT\)', '')
makefile = FileFilter('Qsci/Makefile')
makefile.filter(r'\$\(INSTALL_ROOT\)', '')
make('install')
| 44.592233
| 118
| 0.588287
|
from spack import *
import os
class PyPyqt5(SIPPackage):
homepage = "https://www.riverbankcomputing.com/software/pyqt/intro"
url = "https://www.riverbankcomputing.com/static/Downloads/PyQt5/5.13.0/PyQt5_gpl-5.13.0.tar.gz"
list_url = "https://www.riverbankcomputing.com/software/pyqt/download5"
sip_module = 'PyQt5.sip'
import_modules = [
'PyQt5', 'PyQt5.QtCore', 'PyQt5.QtGui', 'PyQt5.QtHelp',
'PyQt5.QtMultimedia', 'PyQt5.QtMultimediaWidgets', 'PyQt5.QtNetwork',
'PyQt5.QtOpenGL', 'PyQt5.QtPrintSupport', 'PyQt5.QtQml',
'PyQt5.QtQuick', 'PyQt5.QtSvg', 'PyQt5.QtTest', 'PyQt5.QtWebChannel',
'PyQt5.QtWebSockets', 'PyQt5.QtWidgets', 'PyQt5.QtXml',
'PyQt5.QtXmlPatterns'
]
version('5.13.0', sha256='0cdbffe5135926527b61cc3692dd301cd0328dd87eeaf1313e610787c46faff9')
version('5.12.3', sha256='0db0fa37debab147450f9e052286f7a530404e2aaddc438e97a7dcdf56292110')
variant('qsci', default=False, description='Build with QScintilla python bindings')
depends_on('qt@5:+opengl')
depends_on('python@2.6:', type=('build', 'run'))
depends_on('py-enum34', type=('build', 'run'), when='^python@:3.3')
depends_on('qscintilla', when='+qsci')
resource(name='qscintilla',
url='https://www.riverbankcomputing.com/static/Downloads/QScintilla/2.10.2/QScintilla_gpl-2.10.2.tar.gz',
sha256='14b31d20717eed95ea9bea4cd16e5e1b72cee7ebac647cba878e0f6db6a65ed0',
destination='spack-resource-qscintilla',
when='^qscintilla@2.10.2'
)
def configure_args(self):
args = [
'--pyuic5-interpreter', self.spec['python'].command.path,
'--sipdir', self.prefix.share.sip.PyQt5,
'--stubsdir', join_path(site_packages_dir, 'PyQt5'),
]
if '+qsci' in self.spec:
args.extend(['--qsci-api-destdir', self.prefix.share.qsci])
return args
@run_after('install')
def make_qsci(self):
if '+qsci' in self.spec:
rsrc_py_path = os.path.join(
self.stage.source_path,
'spack-resource-qscintilla/QScintilla_gpl-' +
str(self.spec['qscintilla'].version), 'Python')
with working_dir(rsrc_py_path):
pydir = join_path(site_packages_dir, 'PyQt5')
python = self.spec['python'].command
python('configure.py', '--pyqt=PyQt5',
'--sip=' + self.prefix.bin.sip,
'--qsci-incdir=' +
self.spec['qscintilla'].prefix.include,
'--qsci-libdir=' + self.spec['qscintilla'].prefix.lib,
'--qsci-sipdir=' + self.prefix.share.sip.PyQt5,
'--apidir=' + self.prefix.share.qsci,
'--destdir=' + pydir,
'--pyqt-sipdir=' + self.prefix.share.sip.PyQt5,
'--sip-incdir=' + python_include_dir,
'--stubsdir=' + pydir)
qscipro = FileFilter('Qsci/Qsci.pro')
link_qscilibs = 'LIBS += -L' + self.prefix.lib +\
' -lqscintilla2_qt5'
qscipro.filter('TEMPLATE = lib',
'TEMPLATE = lib\nQT += widgets' +
'\nQT += printsupport\n' + link_qscilibs)
make()
makefile = FileFilter('Makefile')
makefile.filter(r'\$\(INSTALL_ROOT\)', '')
makefile = FileFilter('Qsci/Makefile')
makefile.filter(r'\$\(INSTALL_ROOT\)', '')
make('install')
| true
| true
|
f70462794e04bd363c3d9166018d419774a06f8d
| 138
|
py
|
Python
|
test/fixtures.py
|
steinnes/pykubeks
|
20b52f5da2405ce8997a923d526e2e4833ce3c01
|
[
"Apache-2.0"
] | null | null | null |
test/fixtures.py
|
steinnes/pykubeks
|
20b52f5da2405ce8997a923d526e2e4833ce3c01
|
[
"Apache-2.0"
] | 2
|
2019-03-01T15:58:40.000Z
|
2019-03-04T11:07:24.000Z
|
test/fixtures.py
|
steinnes/pykubeks
|
20b52f5da2405ce8997a923d526e2e4833ce3c01
|
[
"Apache-2.0"
] | null | null | null |
AUTHPLUGIN_FIXTURE = '{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1alpha1","spec":{},"status":{"token":"test"}}'
| 69
| 137
| 0.710145
|
AUTHPLUGIN_FIXTURE = '{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1alpha1","spec":{},"status":{"token":"test"}}'
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.