text
stringlengths 2
999k
|
|---|
# Here 's some new strange stuff, remember type it exactly
days = "Mon Tue Wed Thu Fri Sat Sun"
months = "Jan\nFeb\nMar\nApr\nMay\nJun\nJul\nAug"
print "Here are the days :",days
print "Here are the months:",months
print """
There's something going on here.
With the three double-quotes.
We'll be able to type as much as we like/
Even 4 lines ifwe want, or 5, or6.
"""
|
'''
This code is released under MIT license
'''
def code_to_char(code):
code_char_mapping_string="0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
return code_char_mapping_string[code]
def char_to_code(char):
if len(char)==1:
code_char_mapping_string="0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
return code_char_mapping_string.find(char)
else:
raise Exception("Char Input Has to be a Single Character")
def calc_checksum_digit(input_string):
index = 0
checksum = 0
for single_char in input_string.upper():
multiplier = (index % 2) + 1
checksum = checksum + (multiplier * char_to_code(single_char)) % 36 \
+ (multiplier * char_to_code(single_char)) // 36
index=index+1
return code_to_char((36 - (checksum % 36)) % 36)
def main(argv):
for arg in argv:
print(arg,"has checksum digit: ",calc_checksum_digit(arg))
if __name__ == "__main__":
import sys
main(sys.argv[1:])
|
import torch
if __name__ == "__main__":
a = torch.arange(15).reshape((3, 1, 5))
b = torch.arange(30).reshape((3, 2, 5))
print("a:\n", a)
print()
print("b:\n", b)
print()
# to trigger broadcasting mechanism, one of a or b is 1, and other axes size are equal
print("a+b:\n", a + b)
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.optimizer import Optimizer
def policy_kl(p0_mu, p0_sigma, p1_mu, p1_sigma, reduce=True):
c1 = torch.log(p1_sigma/p0_sigma + 1e-5)
c2 = (p0_sigma**2 + (p1_mu - p0_mu)**2)/(2.0 * (p1_sigma**2 + 1e-5))
c3 = -1.0 / 2.0
kl = c1 + c2 + c3
kl = kl.sum(dim=-1) # returning mean between all steps of sum between all actions
if reduce:
return kl.mean()
else:
return kl
def mean_mask(input, mask, sum_mask):
return (input * rnn_masks).sum() / sum_mask
def shape_whc_to_cwh(shape):
#if len(shape) == 2:
# return (shape[1], shape[0])
if len(shape) == 3:
return (shape[2], shape[0], shape[1])
return shape
def save_scheckpoint(filename, state):
print("=> saving checkpoint '{}'".format(filename + '.pth'))
torch.save(state, filename + '.pth')
def load_checkpoint(filename):
print("=> loading checkpoint '{}'".format(filename))
state = torch.load(filename)
return state
def parameterized_truncated_normal(uniform, mu, sigma, a, b):
normal = torch.distributions.normal.Normal(0, 1)
alpha = (a - mu) / sigma
beta = (b - mu) / sigma
alpha_normal_cdf = normal.cdf(torch.from_numpy(np.array(alpha)))
p = alpha_normal_cdf + (normal.cdf(torch.from_numpy(np.array(beta))) - alpha_normal_cdf) * uniform
p = p.numpy()
one = np.array(1, dtype=p.dtype)
epsilon = np.array(np.finfo(p.dtype).eps, dtype=p.dtype)
v = np.clip(2 * p - 1, -one + epsilon, one - epsilon)
x = mu + sigma * np.sqrt(2) * torch.erfinv(torch.from_numpy(v))
x = torch.clamp(x, a, b)
return x
def truncated_normal(uniform, mu=0.0, sigma=1.0, a=-2, b=2):
return parameterized_truncated_normal(uniform, mu, sigma, a, b)
def sample_truncated_normal(shape=(), mu=0.0, sigma=1.0, a=-2, b=2):
return truncated_normal(torch.from_numpy(np.random.uniform(0, 1, shape)), mu, sigma, a, b)
def variance_scaling_initializer(tensor, mode='fan_in',scale = 2.0):
fan = torch.nn.init._calculate_correct_fan(tensor, mode)
print(fan, scale)
sigma = np.sqrt(scale / fan)
with torch.no_grad():
tensor[:] = sample_truncated_normal(tensor.size(), sigma=sigma)
return tensor
def random_sample(obs_batch, prob):
num_batches = obs_batch.size()[0]
permutation = torch.randperm(num_batches, device=obs_batch.device)
start = 0
end = int(prob * num_batches)
indices = permutation[start:end]
return torch.index_select(obs_batch, 0, indices)
def mean_list(val):
return torch.mean(torch.stack(val))
def apply_masks(losses, mask=None):
sum_mask = None
if mask is not None:
mask = mask.unsqueeze(1)
sum_mask = mask.sum()
res_losses = [(l * mask).sum() / sum_mask for l in losses]
else:
res_losses = [torch.mean(l) for l in losses]
return res_losses, sum_mask
def normalization_with_masks(values, masks):
sum_mask = masks.sum()
values_mask = values * masks
values_mean = values_mask.sum() / sum_mask
min_sqr = ((((values_mask)**2)/sum_mask).sum() - ((values_mask/sum_mask).sum())**2)
values_std = torch.sqrt(min_sqr * sum_mask / (sum_mask-1))
normalized_values = (values_mask - values_mean) / (values_std + 1e-8)
return normalized_values
class CoordConv2d(nn.Conv2d):
pool = {}
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__(in_channels + 2, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
@staticmethod
def get_coord(x):
key = int(x.size(0)), int(x.size(2)), int(x.size(3)), x.type()
if key not in CoordConv2d.pool:
theta = torch.Tensor([[[1, 0, 0], [0, 1, 0]]])
coord = torch.nn.functional.affine_grid(theta, torch.Size([1, 1, x.size(2), x.size(3)])).permute([0, 3, 1, 2]).repeat(
x.size(0), 1, 1, 1).type_as(x)
CoordConv2d.pool[key] = coord
return CoordConv2d.pool[key]
def forward(self, x):
return torch.nn.functional.conv2d(torch.cat([x, self.get_coord(x).type_as(x)], 1), self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
class LayerNorm2d(nn.Module):
"""
Layer norm the just works on the channel axis for a Conv2d
Ref:
- code modified from https://github.com/Scitator/Run-Skeleton-Run/blob/master/common/modules/LayerNorm.py
- paper: https://arxiv.org/abs/1607.06450
Usage:
ln = LayerNormConv(3)
x = Variable(torch.rand((1,3,4,2)))
ln(x).size()
"""
def __init__(self, features, eps=1e-6):
super().__init__()
self.register_buffer("gamma", torch.ones(features).unsqueeze(-1).unsqueeze(-1))
self.register_buffer("beta", torch.ones(features).unsqueeze(-1).unsqueeze(-1))
self.eps = eps
self.features = features
def _check_input_dim(self, input):
if input.size(1) != self.gamma.nelement():
raise ValueError('got {}-feature tensor, expected {}'
.format(input.size(1), self.features))
def forward(self, x):
self._check_input_dim(x)
x_flat = x.transpose(1,-1).contiguous().view((-1, x.size(1)))
mean = x_flat.mean(0).unsqueeze(-1).unsqueeze(-1).expand_as(x)
std = x_flat.std(0).unsqueeze(-1).unsqueeze(-1).expand_as(x)
return self.gamma.expand_as(x) * (x - mean) / (std + self.eps) + self.beta.expand_as(x)
class DiscreteActionsEncoder(nn.Module):
def __init__(self, actions_max, mlp_out, emb_size, num_agents, use_embedding):
super().__init__()
self.actions_max = actions_max
self.emb_size = emb_size
self.num_agents = num_agents
self.use_embedding = use_embedding
if use_embedding:
self.embedding = torch.nn.Embedding(actions_max, emb_size)
else:
self.emb_size = actions_max
self.linear = torch.nn.Linear(self.emb_size * num_agents, mlp_out)
def forward(self, discrete_actions):
if self.use_embedding:
emb = self.embedding(discrete_actions)
else:
emb = torch.nn.functional.one_hot(discrete_actions, num_classes=self.actions_max)
emb = emb.view( -1, self.emb_size * self.num_agents).float()
emb = self.linear(emb)
return emb
def get_model_gradients(model):
grad_list = []
for param in model.parameters():
grad_list.append(param.grad)
return grad_list
def get_mean(v):
if len(v) > 0:
mean = np.mean(v)
else:
mean = 0
return mean
class CategoricalMasked2(torch.distributions.Categorical):
def __init__(self, probs=None, logits=None, validate_args=None, masks=None):
self.masks = masks
if self.masks is None:
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
else:
inf_mask = torch.log(masks.float())
logits = logits + inf_mask
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
def entropy(self):
if self.masks is None:
return super(CategoricalMasked, self).entropy()
p_log_p = self.logits * self.probs
p_log_p[p_log_p != p_log_p] = 0
return -p_log_p.sum(-1)
class CategoricalMasked(torch.distributions.Categorical):
def __init__(self, probs=None, logits=None, validate_args=None, masks=None):
self.masks = masks
if masks is None:
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
else:
self.device = self.masks.device
logits = torch.where(self.masks, logits, torch.tensor(-1e+8).to(self.device))
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
def entropy(self):
if self.masks is None:
return super(CategoricalMasked, self).entropy()
p_log_p = self.logits * self.probs
p_log_p = torch.where(self.masks, p_log_p, torch.tensor(0.0).to(self.device))
return -p_log_p.sum(-1)
class AverageMeter(nn.Module):
def __init__(self, in_shape, max_size):
super(AverageMeter, self).__init__()
self.max_size = max_size
self.current_size = 0
self.register_buffer("mean", torch.zeros(in_shape, dtype = torch.float32))
def update(self, values):
size = values.size()[0]
if size == 0:
return
new_mean = torch.mean(values.float(), dim=0)
size = np.clip(size, 0, self.max_size)
old_size = min(self.max_size - size, self.current_size)
size_sum = old_size + size
self.current_size = size_sum
self.mean = (self.mean * old_size + new_mean * size) / size_sum
def clear(self):
self.current_size = 0
self.mean.fill_(0)
def __len__(self):
return self.current_size
def get_mean(self):
return self.mean.squeeze(0).cpu().numpy()
|
from unittest.mock import patch, Mock
from django.test import TestCase, override_settings
from django.core.management import call_command
from solo_rog_api.management.commands import populate_fake
from solo_rog_api.models import (
User,
Address,
Part,
SuppAdd,
SubInventory,
Locator,
Document,
Status,
)
class CreateFakeDataTestCase(TestCase):
import_root = "solo_rog_api.management.commands.populate_fake"
def test_creates_fake_users(self) -> None:
User.objects.create(username="deleteme")
self.assertEqual(User.objects.count(), 1)
populate_fake.create_fake_users(2)
self.assertEqual(User.objects.count(), 2)
def test_creates_fake_parts(self) -> None:
populate_fake.create_fake_parts(2)
self.assertEqual(Part.objects.count(), 2)
def test_creates_fake_suppadds(self) -> None:
populate_fake.create_fake_suppadds(1)
self.assertEqual(SuppAdd.objects.count(), 1)
@patch("solo_rog_api.management.commands.populate_fake.random.randint")
def test_creates_fake_subinventories(self, randint_mock: Mock) -> None:
randint_mock.return_value = 3
SuppAdd.objects.create(code="testcode", desc="testdesc")
populate_fake.create_fake_subinventorys(3)
self.assertEqual(SubInventory.objects.count(), 3)
self.assertEqual(
SubInventory.objects.filter(
suppadd__code="testcode", suppadd__desc="testdesc"
).count(),
3,
)
@patch("solo_rog_api.management.commands.populate_fake.random.randint")
def test_creates_fake_locators(self, randint_mock: Mock) -> None:
randint_mock.return_value = 3
SubInventory.objects.create(code="testcode", desc="testdesc")
populate_fake.create_fake_locators()
self.assertEqual(Locator.objects.count(), 3)
self.assertEqual(
Locator.objects.filter(subinventorys__code="testcode").count(), 3
)
def test_creates_fake_documents(self) -> None:
SuppAdd.objects.create(code="testcode", desc="testdesc")
populate_fake.create_fake_addresses(2)
Part.objects.create(nsn="testnsn")
populate_fake.create_fake_documents(1)
self.assertEqual(Document.objects.count(), 1)
self.assertTrue(
Document.objects.filter(
part__nsn="testnsn",
suppadd__code="testcode",
sdn__isnull=False,
service_request__isnull=False,
).exists()
)
@patch("solo_rog_api.management.commands.populate_fake.random.randint")
def test_creates_fake_statuses_ae1(self, randint_mock: Mock) -> None:
randint_mock.return_value = 1
Document.objects.create(sdn="testsdn")
populate_fake.create_fake_statuses_for_documents()
self.assertEqual(Status.objects.count(), 1)
self.assertTrue(Status.objects.filter(dic="AE1").exists())
@patch("solo_rog_api.management.commands.populate_fake.random.randint")
def test_creates_fake_statuses_as2(self, randint_mock: Mock) -> None:
randint_mock.return_value = 3
doc = Document.objects.create(sdn="testsdn")
populate_fake.create_fake_statuses_for_documents()
self.assertEqual(Status.objects.count(), 3)
for code in ["AE1", "AS1", "AS2"]:
self.assertTrue(
Status.objects.filter(dic=code, document__id=doc.id).exists()
)
@patch("solo_rog_api.management.commands.populate_fake.random.randint")
def test_creates_fake_statuses_cor(self, randint_mock: Mock) -> None:
randint_mock.return_value = 5
doc = Document.objects.create(sdn="testsdn")
populate_fake.create_fake_statuses_for_documents()
self.assertEqual(Status.objects.count(), 5)
for code in ["AE1", "AS1", "AS2", "D6T", "COR"]:
self.assertTrue(
Status.objects.filter(dic=code, document__id=doc.id).exists()
)
def test_creates_fake_addresses(self) -> None:
populate_fake.create_fake_addresses(4)
self.assertEqual(Address.objects.count(), 4)
@override_settings(DEBUG=False)
def test_management_command_does_not_work_outside_development(
self, *args: Mock
) -> None:
stdout_mock = Mock()
call_command("populate_fake", stdout=stdout_mock)
self.assertTrue(stdout_mock.write.called)
self.assertIn("Only meant for development", stdout_mock.write.call_args[0][0])
for mock in args:
self.assertFalse(mock.called)
@patch(f"{import_root}.create_fake_users")
@patch(f"{import_root}.create_fake_parts")
@patch(f"{import_root}.create_fake_suppadds")
@patch(f"{import_root}.create_fake_subinventorys")
@patch(f"{import_root}.create_fake_locators")
@patch(f"{import_root}.create_fake_documents")
@patch(f"{import_root}.create_fake_statuses_for_documents")
@patch(f"{import_root}.create_fake_addresses")
@override_settings(DEBUG=True)
def test_management_command_calls_all_fake_data_functions(
self, *args: Mock
) -> None:
stdout_mock = Mock()
call_command("populate_fake", stdout=stdout_mock)
for mock in args:
self.assertTrue(mock.called)
|
""" all filters and the data types of their input """
# used with get_mentions() in queries/groups and with filters in rules
params = {
"author": str,
"xauthor": str,
"exactAuthor": str,
"xexactAuthor": str,
"authorGroup": list, # user passes in a string which gets converted to a list of ids
"xauthorGroup": list, # user passes in a string which gets converted to a list of ids
"blogCommentsMin": int,
"blogCommentsMax": int,
"category": list,
# user passes in a dictionary {parent:[child1, child2, etc]} which gets converted to a list of ids
"xcategory": list,
# user passes in a dictionary {parent:[child, child2, etc]} which gets converted to a list of ids
"parentCategory": list, # user passes in a string which gets converted to a list of ids
"xparentCategory": list, # user passes in a string which gets converted to a list of ids
"facebookAuthorId": int,
"xfacebookAuthorId": int,
"facebookRole": str,
"xfacebookRole": str,
"facebookSubtype": str,
"xfacebookSubtype": str,
"facebookCommentsMin": int,
"facebookCommentsMax": int,
"facebookLikesMin": int,
"facebookLikesMax": int,
"facebookSharesMin": int,
"facebookSharesMax": int,
"resourceType": str,
"xresourceType": str,
"forumPostsMin": int,
"forumPostsMax": int,
"forumViewsMin": int,
"forumViewsMax": int,
"impactMin": int,
"impactMax": int,
"language": str,
"xlanguage": str,
"locationGroup": list, # user passes in a string which gets converted to a list of ids
"xlocationGroup": list, # user passes in a string which gets converted to a list of ids
"location": str,
"xlocation": str,
"starred": bool,
"search": str,
"pageType": (str, list),
"xpageType": (str, list),
"sentiment": str,
"siteGroup": list, # user passes in a string which gets converted to a list of ids
"xsiteGroup": list, # user passes in a string which gets converted to a list of ids
"backlinksMin": int,
"backlinksMax": int,
"mozRankMin": int,
"mozRankMax": int,
"domain": str,
"xdomain": str,
"pagesPerVisitMin": int,
"pagesPerVisitMax": int,
"averageVisitsMin": int,
"averageVisitsMax": int,
"monthlyVisitorsMin": int,
"monthlyVisitorsMax": int,
"percentFemaleVisitorsMin": int,
"percentMaleVisitorsMin": int,
"averageDurationOfVisitMin": int,
"averageDurationOfVisitMax": int,
"tag": list, # user passes in a string which gets converted to a list of ids
"xtag": list, # user passes in a string which gets converted to a list of ids
"authorLocationGroup": list, # user passes in a string which gets converted to a list of ids
"xauthorLocationGroup": list, # user passes in a string which gets converted to a list of ids
"authorLocation": str,
"xauthorLocation": str,
"twitterFollowersMin": int,
"twitterFollowersMax": int,
"twitterFollowingMin": int,
"twitterFollowingMax": int,
"twitterReplyTo": str,
"xtwitterReplyTo": str,
"twitterRetweetOf": str,
"xtwitterRetweetOf": str,
"twitterPostCountMin": int,
"twitterPostCountMax": int,
"twitterRetweetsMin": int,
"twitterRetweetsMax": int,
"reachMin": int,
"reachMax": int,
"influenceMin": int,
"influenceMax": int,
"outreachMin": int,
"outreachMax": int,
"twitterVerified": bool,
"twitterRole": str,
"twitterAuthorId": int,
"xtwitterAuthorId": int,
"impressionsMin": int,
"impressionsMax": int,
"gender": str,
"accountType": (str, list), # one or more filters
"xaccountType": (str, list), # one or more filters
"profession": (str, list), # one or more filters
"xprofession": (str, list), # one or more filters
"interest": (str, list), # one or more filters
"xinterest": (str, list), # one or more filters
"geolocated": bool,
"latitudeMin": int,
"latitudeMax": int,
"longitudeMin": int,
"longitudeMax": int,
"status": str,
"xstatus": str,
"priority": str,
"xpriority": str,
"checked": bool,
"assigned": str,
"xassigned": str,
"threadId": int,
"xthreadId": int,
"threadEntryType": str,
"xthreadEntryType": str,
"threadAuthor": str,
"xthreadAuthor": str,
"postByAuthor": str,
"xpostByAuthor": str,
"shareOfAuthor": str,
"xshareOfAuthor": str,
"replyToAuthor": str,
"xreplyToAuthor": str,
"insightsEmoticon": str,
"xinsightsEmoticon": str,
"insightsHashtag": str,
"xinsightsHashtag": str,
"insightsMentioned": str,
"xinsightsMentioned": str,
"insightsUrl": str,
"xinsightsUrl": str,
"exclusiveLocation": str,
"hourOfDay": str,
"dayOfWeek": str,
"untilAssignmentUpdated": str,
"sinceAssignmentUpdated": str,
}
""" filters which are limited to a set of options """
special_options = {
"sentiment": ["positive", "negative", "neutral"],
"gender": ["male", "female"],
"status": ["open", "pending", "closed"],
"xstatus": ["open", "pending", "closed"],
"priority": ["high", "medium", "low"],
"xpriority": ["high", "medium", "low"],
"facebookRole": ["owner", "audience"],
"xfacebookRole": ["owner", "audience"],
"facebookSubtype": ["link", "other", "photo", "status", "video"],
"xfacebookSubtype": ["link", "other", "photo", "status", "video"],
"resourceType": ["public-facebook-post", "public-facebook-comment"],
"xresourceType": ["public-facebook-post", "public-facebook-comment"],
"pageType": [
"blog",
"forum",
"news",
"general",
"video",
"twitter",
"review",
"image",
"instagram",
"facebook",
],
"xpageType": [
"blog",
"forum",
"news",
"general",
"video",
"twitter",
"review",
"image",
"instagram",
"facebook",
],
"accountType": ["individual", "organizational"],
"xaccountType": ["individual", "organizational"],
"profession": [
"Executive",
"Student",
"Politician",
"Artist",
"Scientist & Researcher",
"Journalist",
"Software developer & IT",
"Legal",
"Health practitioner",
"Sportpersons & Trainer",
"Sales/Marketing/PR",
"Teacher & Lecturer",
],
"xprofession": [
"Executive",
"Student",
"Politician",
"Artist",
"Scientist & Researcher",
"Journalist",
"Software developer & IT",
"Legal",
"Health practitioner",
"Sportpersons & Trainer",
"Sales/Marketing/PR",
"Teacher & Lecturer",
],
"interest": [
"Animals & Pets",
"Fine arts",
"Automotive",
"Beauty/Health & Fitness",
"Books",
"Business",
"Environment",
"Family & Parenting",
"Fashion",
"Movies",
"Food & Drinks",
"Games",
"Music",
"Photo & Video",
"Politics",
"Science",
"Shopping",
"Sports",
"Technology",
"Travel",
"TV",
],
"xinterest": [
"Animals & Pets",
"Fine arts",
"Automotive",
"Beauty/Health & Fitness",
"Books",
"Business",
"Environment",
"Family & Parenting",
"Fashion",
"Movies",
"Food & Drinks",
"Games",
"Music",
"Photo & Video",
"Politics",
"Science",
"Shopping",
"Sports",
"Technology",
"Travel",
"TV",
],
}
""" mention attribultes which can be changed """
# used with patch_mentions() in queries/groups and with uploads in rules
mutable = {
"addTag": list,
"removeTag": list,
"addCategories": list,
"removeCategories": list,
"priority": str,
"removePriority": str,
"status": str,
"removeStatus": str,
"assignment": str,
"removeAssignment": str,
"sentiment": str,
"checked": bool,
"starred": bool,
"location": str,
}
mutable_options = {
"sentiment": ["positive", "negative", "neutral"],
"status": ["open", "pending", "closed"],
"removeStatus": ["open", "pending", "closed"],
"priority": ["high", "medium", "low"],
"removePriority": ["high", "medium", "low"],
}
|
from pytest import approx
from vyperdatum.points import *
from vyperdatum.vdatum_validation import vdatum_answers
gvc = VyperCore()
data_folder = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data')
vdatum_answer = vdatum_answers[gvc.vdatum.vdatum_version]
def test_points_setup():
# these tests assume you have the vdatum path setup in VyperCore
# first time, you need to run it with the path to the vdatum folder, vp = VyperPoints('path\to\vdatum')
vp = VyperPoints()
assert os.path.exists(vp.vdatum.vdatum_path)
assert vp.vdatum.grid_files
assert vp.vdatum.polygon_files
assert vp.vdatum.vdatum_version
assert vp.vdatum.regions
def _transform_dataset(region: str):
vp = VyperPoints()
x = vdatum_answer[region]['x']
y = vdatum_answer[region]['y']
z = vdatum_answer[region]['z_nad83']
vp.transform_points((6319, 'ellipse'), 'mllw', x, y, z=z, include_vdatum_uncertainty=False)
assert vp.x == approx(x, abs=0.0001)
assert vp.y == approx(y, abs=0.0001)
assert vp.z == approx(vdatum_answer[region]['z_mllw'], abs=0.002)
def _transform_dataset_sampled(region: str):
vp = VyperPoints()
x = vdatum_answer[region]['x']
y = vdatum_answer[region]['y']
z = vdatum_answer[region]['z_nad83']
vp.transform_points((6319, 'ellipse'), 'mllw', x, y, z=z, include_vdatum_uncertainty=False, sample_distance=0.0005)
# sampled points workflow does not return new xy coordinates, we can't just expand the sampled points to get new xy
assert vp.x is None
assert vp.y is None
assert vp.z == approx(vdatum_answer[region]['z_mllw'], abs=0.002)
def test_transform_north_carolina_dataset():
_transform_dataset('north_carolina')
def test_transform_texas_dataset():
_transform_dataset('texas')
def test_transform_california_dataset():
_transform_dataset('california')
def test_transform_alaska_southeast_dataset():
_transform_dataset('alaska_southeast')
def test_transform_north_carolina_dataset_sampled():
_transform_dataset_sampled('north_carolina')
def test_transform_texas_dataset_sampled():
_transform_dataset_sampled('texas')
def test_transform_california_dataset_sampled():
_transform_dataset_sampled('california')
def test_transform_alaska_southeast_dataset_sampled():
_transform_dataset_sampled('alaska_southeast')
def _transform_dataset_direction():
vp = VyperPoints()
x = vdatum_answer['north_carolina']['x']
y = vdatum_answer['north_carolina']['y']
z = vdatum_answer['north_carolina']['z_nad83']
# assumes positive up input, and mllw means positive up output
vp.transform_points((6319, 'ellipse'), 'mllw', x, y, z=z, include_vdatum_uncertainty=False)
assert vp.z == approx(vdatum_answer['north_carolina']['z_mllw'], abs=0.002)
# if we want positive down output, we need to use an epsg with positive down axis direction for mllw
vp = VyperPoints()
vp.transform_points((6319, 'ellipse'), 5866, x, y, z=z, include_vdatum_uncertainty=False)
assert vp.z == approx(-vdatum_answer['north_carolina']['z_mllw'], abs=0.002)
|
from datetime import timedelta
def add_gigasecond(date):
return date + timedelta(0, 10**9)
|
import sys
sys.path.append('../src')
import data_io, params, SIF_embedding
# input
wordfile = '/home/diego/DATA/NLP/vectors.txt' # word vector file, can be downloaded from GloVe website
weightfile = '/home/diego/DATA/NLP/vocab.txt' # each line is a word and its frequency
weightpara = 1e-3 # the parameter in the SIF weighting scheme, usually in the range [3e-5, 3e-3]
rmpc = 1 # number of principal components to remove in SIF weighting scheme
sentences = ['this is an example sentence', 'this is another sentence that is slightly longer']
# load word vectors
(words, We) = data_io.getWordmap(wordfile)
# load word weights
word2weight = data_io.getWordWeight(weightfile, weightpara) # word2weight['str'] is the weight for the word 'str'
weight4ind = data_io.getWeight(words, word2weight) # weight4ind[i] is the weight for the i-th word
# load sentences
x, m, _ = data_io.sentences2idx(sentences, words) # x is the array of word indices, m is the binary mask indicating whether there is a word in that location
w = data_io.seq2weight(x, m, weight4ind) # get word weights
# set parameters
params = params.params()
params.rmpc = rmpc
# get SIF embedding
embedding = SIF_embedding.SIF_embedding(We, x, w, params) # embedding[i,:] is the embedding for sentence i
|
"""
Dummy conftest.py for flask.
If you don't know what this is for, just leave it empty.
Read more about conftest.py under:
- https://docs.pytest.org/en/stable/fixture.html
- https://docs.pytest.org/en/stable/writing_plugins.html
"""
# import pytest
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend goldbits received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a goldbitd or Goldbit-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting MBT values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the goldbit data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Goldbit/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Goldbit")
return os.path.expanduser("~/.goldbit")
def read_goldbit_config(dbdir):
"""Read the goldbit.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "goldbit.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a goldbit JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 18332 if testnet else 8332
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the goldbitd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(goldbitd):
info = goldbitd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
goldbitd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = goldbitd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(goldbitd):
address_summary = dict()
address_to_account = dict()
for info in goldbitd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = goldbitd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = goldbitd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-goldbit-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(goldbitd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(goldbitd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f MBT available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to goldbitd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = goldbitd.createrawtransaction(inputs, outputs)
signed_rawtx = goldbitd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(goldbitd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = goldbitd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(goldbitd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = goldbitd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(goldbitd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get goldbits from")
parser.add_option("--to", dest="to", default=None,
help="address to get send goldbits to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of goldbit.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_goldbit_config(options.datadir)
if options.testnet: config['testnet'] = True
goldbitd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(goldbitd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(goldbitd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(goldbitd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(goldbitd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = goldbitd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
from __future__ import absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
import time as timer
import os
import KratosMultiphysics as Kratos
import KratosMultiphysics.ExternalSolversApplication
import KratosMultiphysics.FluidDynamicsApplication
import KratosMultiphysics.StructuralMechanicsApplication
import KratosMultiphysics.PoromechanicsApplication as KratosPoro
from KratosMultiphysics.analysis_stage import AnalysisStage
from importlib import import_module
class PoromechanicsAnalysis(AnalysisStage):
'''Main script for poromechanics simulations.'''
def __init__(self,model,parameters):
# Time monitoring
KratosMultiphysics.Logger.PrintInfo(self._GetSimulationName(),timer.ctime())
self.initial_time = timer.perf_counter()
# Set number of OMP threads
parallel=Kratos.OpenMPUtils()
parallel.SetNumThreads(parameters["problem_data"]["number_of_threads"].GetInt())
## Import parallel modules if needed
if (parameters["problem_data"]["parallel_type"].GetString() == "MPI"):
import KratosMultiphysics.MetisApplication as MetisApplication
import KratosMultiphysics.TrilinosApplication as TrilinosApplication
KratosMultiphysics.Logger.PrintInfo(self._GetSimulationName(),"MPI parallel configuration. OMP_NUM_THREADS =",parallel.GetNumThreads())
else:
from KratosMultiphysics.PoromechanicsApplication import poromechanics_cleaning_utility
poromechanics_cleaning_utility.CleanPreviousFiles(os.getcwd()) # Clean previous post files
KratosMultiphysics.Logger.PrintInfo(self._GetSimulationName(),"OpenMP parallel configuration. OMP_NUM_THREADS =",parallel.GetNumThreads())
# Initialize Fracture Propagation Utility if necessary
if parameters["problem_data"]["fracture_utility"].GetBool():
from KratosMultiphysics.PoromechanicsApplication.poromechanics_fracture_propagation_utility import FracturePropagationUtility
self.fracture_utility = FracturePropagationUtility(model,
self._GetOrderOfProcessesInitialization())
parameters = self.fracture_utility.Initialize(parameters)
# Creating solver and model part and adding variables
super(PoromechanicsAnalysis,self).__init__(model,parameters)
if parameters["problem_data"].Has("initial_stress_utility_settings"):
from KratosMultiphysics.PoromechanicsApplication.poromechanics_initial_stress_utility import InitialStressUtility
self.initial_stress_utility = InitialStressUtility(model,parameters)
def Initialize(self):
super(PoromechanicsAnalysis,self).Initialize()
if self.project_parameters["problem_data"].Has("initial_stress_utility_settings"):
self.initial_stress_utility.Load()
def OutputSolutionStep(self):
super(PoromechanicsAnalysis,self).OutputSolutionStep()
# Check Fracture Propagation Utility
if self.project_parameters["problem_data"]["fracture_utility"].GetBool():
if self.fracture_utility.IsPropagationStep():
self._solver,self._list_of_processes,self._list_of_output_processes = self.fracture_utility.CheckPropagation(self._solver,
self._list_of_processes,
self._list_of_output_processes)
def Finalize(self):
super(PoromechanicsAnalysis,self).Finalize()
# Finalize Fracture Propagation Utility
if self.project_parameters["problem_data"]["fracture_utility"].GetBool():
self.fracture_utility.Finalize()
if self.project_parameters["problem_data"].Has("initial_stress_utility_settings"):
self.initial_stress_utility.Save()
# Finalizing strategy
if self.parallel_type == "OpenMP":
self._GetSolver().Clear()
# Time control
KratosMultiphysics.Logger.PrintInfo(self._GetSimulationName(),"Analysis Completed. Elapsed Time = %.3f" % (timer.perf_counter() - self.initial_time)," seconds.")
KratosMultiphysics.Logger.PrintInfo(self._GetSimulationName(),timer.ctime())
def _CreateSolver(self):
python_module_name = "KratosMultiphysics.PoromechanicsApplication"
full_module_name = python_module_name + "." + self.project_parameters["solver_settings"]["solver_type"].GetString()
solver_module = import_module(full_module_name)
solver = solver_module.CreateSolver(self.model, self.project_parameters["solver_settings"])
return solver
def _GetOrderOfProcessesInitialization(self):
return ["constraints_process_list",
"loads_process_list",
"auxiliar_process_list"]
def _GetSimulationName(self):
return "Poromechanics Analysis"
if __name__ == '__main__':
from sys import argv
if len(argv) > 2:
err_msg = 'Too many input arguments!\n'
err_msg += 'Use this script in the following way:\n'
err_msg += '- With default parameter file (assumed to be called "ProjectParameters.json"):\n'
err_msg += ' "python poromechanics_analysis.py"\n'
err_msg += '- With custom parameter file:\n'
err_msg += ' "python poromechanics_analysis.py <my-parameter-file>.json"\n'
raise Exception(err_msg)
if len(argv) == 2: # ProjectParameters is being passed from outside
parameter_file_name = argv[1]
else: # using default name
parameter_file_name = "ProjectParameters.json"
with open(parameter_file_name,'r') as parameter_file:
parameters = Kratos.Parameters(parameter_file.read())
model = Kratos.Model()
simulation = PoromechanicsAnalysis(model,parameters)
simulation.Run()
|
import tensorflow as tf
import analytic_functions
import residuals
def get_tensors_from_batch(X, y_pred, y_real, num_inputs, num_outputs, num_conditions, num_feval):
"""Given the minibatches, retrieve the tensors containing the original point, the points+-deltas and boundary conditions"""
X_batches = []
y_pred_batches = []
y_real_batches = []
num_output = tf.constant(num_outputs)
num_features = tf.constant(num_inputs)
shape_x = tf.shape(X, name="shape_X") #15
batch_size = tf.cast((shape_x[0]-num_conditions) / num_feval, tf.int32, name="batch_size") #5
# Reset the tensor to 0,0 for every new batch
begin_x = tf.get_variable("begin_x", initializer=[0, 0], dtype=tf.int32)
begin_y = tf.get_variable("begin_y", initializer=[0, 0], dtype=tf.int32)
begin_x = tf.assign(begin_x, [0, 0])
begin_y = tf.assign(begin_y, [0, 0])
multiplier_begin = tf.constant([1, 0])
size_x = tf.stack([batch_size, num_features])
size_y = tf.stack([batch_size, num_output])
size_x_initial = tf.stack([num_conditions, num_features], name="size_x_initial")
size_y_initial = tf.stack([num_conditions, num_output], name="size_y_initial")
offset_increment_x = tf.multiply(size_x, multiplier_begin)
offset_increment_y = tf.multiply(size_y, multiplier_begin)
#size_x = tf.Print(size_x, [begin_y, size_y_initial], message="begin_y, size_y_initial")
# Retrieve points in the minibatch and predictions
for i in range(num_feval):
X_batch = tf.slice(X, begin_x, size_x)
y_pred_batch = tf.slice(y_pred, begin_y, size_y)
y_real_batch = tf.slice(y_real, begin_y, size_y)
begin_x = tf.add(begin_x, offset_increment_x, name="begin_x_new")
begin_y = tf.add(begin_y, offset_increment_y, name="begin_y_new")
X_batches.append(X_batch)
y_pred_batches.append(y_pred_batch)
y_real_batches.append(y_real_batch)
#Retrieve initial conditions. Initial conditions go at the end of the list
X_initial = tf.slice(X, begin_x, size_x_initial, name="x_initial_batch")
y_real_initial = tf.slice(y_real, begin_y, size_y_initial, name="y_real_initial_batch")
y_pred_initial = tf.slice(y_pred, begin_y, size_y_initial, name="y_pred_initial_batch")
X_batches.append(X_initial)
y_pred_batches.append(y_pred_initial)
y_real_batches.append(y_real_initial)
return X_batches, y_pred_batches, y_real_batches, batch_size
def residual_function_wrapper(num_inputs, num_outputs, deltas, num_feval, num_conditions, alpha=1, **kwargs):
def residual_function(X, y_pred, y_real):
X_batches, y_pred_batches, y_real_batches, batch_size = get_tensors_from_batch(X, y_pred, y_real, num_inputs, num_outputs, num_conditions, num_feval)
r = residuals.residual_phi_integral(X_batches, y_pred_batches, y_real_batches, deltas, batch_size, num_conditions, alpha, **kwargs)
tf.summary.scalar('residual', r)
# y original batches
y_original = y_real_batches[0]
# y pred batches
y_pred_original = y_pred_batches[0]
e = tf.reduce_sum(tf.pow(tf.subtract(y_original, y_pred_original), 2)) / (2 * tf.cast(batch_size, tf.float32))
tf.summary.scalar('rmse', e)
return r, e
return residual_function
|
"""authentication URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from .import views
urlpatterns = [
path('googleauth/', views.GoogleLogin.as_view(), name='google_login'),
]
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for primitive Neural Net (NN) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import deprecation
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
from tensorflow.python.util.tf_export import tf_export
# Aliases for some automatically-generated names.
local_response_normalization = gen_nn_ops.lrn
# pylint: disable=protected-access
def _get_sequence(value, n, channel_index, name):
"""Formats a value input for gen_nn_ops."""
if value is None:
value = [1]
elif not isinstance(value, collections.Sized):
value = [value]
current_n = len(value)
if current_n == n + 2:
return value
elif current_n == 1:
value = list((value[0],) * n)
elif current_n == n:
value = list(value)
else:
raise ValueError("{} should be of length 1, {} or {} but was {}".format(
name, n, n + 2, current_n))
if channel_index == 1:
return [1, 1] + value
else:
return [1] + value + [1]
def _non_atrous_convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
data_format=None, # pylint: disable=redefined-builtin
strides=None,
name=None):
"""Computes sums of N-D convolutions (actually cross correlation).
It is required that 1 <= N <= 3.
This is used to implement the more generic `convolution` function, which
extends the interface of this function with a `dilation_rate` parameter.
Args:
input: Rank N+2 tensor of type T of shape
`[batch_size] + input_spatial_shape + [in_channels]` if `data_format`
does not start with `"NC"`, or
`[batch_size, in_channels] + input_spatial_shape` if `data_format` starts
with `"NC"`.
filter: Rank N+2 tensor of type T of shape
`filter_spatial_shape + [in_channels, out_channels]`. Rank of either
`input` or `filter` must be known.
padding: Padding method to use, must be either "VALID" or "SAME".
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
strides: Sequence of N positive integers, defaults to `[1] * N`.
name: Name prefix to use.
Returns:
Rank N+2 tensor of type T of shape
`[batch_size] + output_spatial_shape + [out_channels]`, where
if padding == "SAME":
output_spatial_shape = input_spatial_shape
if padding == "VALID":
output_spatial_shape = input_spatial_shape - filter_spatial_shape + 1.
Raises:
ValueError: if ranks are incompatible.
"""
with ops.name_scope(name, "non_atrous_convolution", [input, filter]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.get_shape()
op = _NonAtrousConvolution(
input_shape,
filter_shape=filter_shape,
padding=padding,
data_format=data_format,
strides=strides,
name=scope)
return op(input, filter)
class _NonAtrousConvolution(object):
"""Helper class for _non_atrous_convolution.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments:
input_shape: static input shape, i.e. input.get_shape().
filter_shape: static filter shape, i.e. filter.get_shape().
padding: see _non_atrous_convolution.
data_format: see _non_atrous_convolution.
strides: see _non_atrous_convolution.
name: see _non_atrous_convolution.
"""
def __init__(
self,
input_shape,
filter_shape, # pylint: disable=redefined-builtin
padding,
data_format=None,
strides=None,
name=None):
filter_shape = filter_shape.with_rank(input_shape.ndims)
self.padding = padding
self.name = name
input_shape = input_shape.with_rank(filter_shape.ndims)
if input_shape.ndims is None:
raise ValueError("Rank of convolution must be known")
if input_shape.ndims < 3 or input_shape.ndims > 5:
raise ValueError(
"`input` and `filter` must have rank at least 3 and at most 5")
conv_dims = input_shape.ndims - 2
if strides is None:
strides = [1] * conv_dims
elif len(strides) != conv_dims:
raise ValueError("len(strides)=%d, but should be %d" % (len(strides),
conv_dims))
if conv_dims == 1:
# conv1d uses the 2-d data format names
if data_format is None:
data_format = "NWC"
elif data_format not in {"NCW", "NWC", "NCHW", "NHWC"}:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
self.strides = strides[0]
self.data_format = data_format
self.conv_op = self._conv1d
elif conv_dims == 2:
if data_format is None or data_format == "NHWC":
data_format = "NHWC"
strides = [1] + list(strides) + [1]
elif data_format == "NCHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NHWC\" or \"NCHW\".")
self.strides = strides
self.data_format = data_format
self.conv_op = conv2d
elif conv_dims == 3:
if data_format is None or data_format == "NDHWC":
strides = [1] + list(strides) + [1]
elif data_format == "NCDHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NDHWC\" or \"NCDHW\". Have: %s"
% data_format)
self.strides = strides
self.data_format = data_format
self.conv_op = gen_nn_ops.conv3d
# Note that we need this adapter since argument names for conv1d don't match
# those for gen_nn_ops.conv2d and gen_nn_ops.conv3d.
# pylint: disable=redefined-builtin
def _conv1d(self, input, filter, strides, padding, data_format, name):
return conv1d(
value=input,
filters=filter,
stride=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(
input=inp,
filter=filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
name=self.name)
@tf_export("nn.dilation2d", v1=[])
def dilation2d_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale dilation of 4-D `input` and 3-D `filters` tensors.
The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the output
tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D dilation is the max-sum correlation
(for consistency with `conv2d`, we use unmirrored filters):
output[b, y, x, c] =
max_{dy, dx} input[b,
strides[1] * y + rates[1] * dy,
strides[2] * x + rates[2] * dx,
c] +
filters[dy, dx, c]
Max-pooling is a special case when the filter has size equal to the pooling
kernel size and contains all zeros.
Note on duality: The dilation of `input` by the `filters` is equal to the
negation of the erosion of `-input` by the reflected `filters`.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `input`.
3-D with shape `[filter_height, filter_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the input
tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NCHW"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
The input stride for atrous morphological dilation. Must be:
`[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if data_format != "NCHW":
raise ValueError("Data formats other than NCHW are not yet supported")
return gen_nn_ops.dilation2d(input=input,
filter=filters,
strides=strides,
rates=dilations,
padding=padding,
name=name)
@tf_export(v1=["nn.dilation2d"])
def dilation2d_v1( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
filter=None, # pylint: disable=redefined-builtin
strides=None,
rates=None,
padding=None,
name=None,
filters=None,
dilations=None):
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
rates = deprecated_argument_lookup("dilations", dilations, "rates", rates)
return gen_nn_ops.dilation2d(input, filter, strides, rates, padding, name)
dilation2d_v1.__doc__ = gen_nn_ops.dilation2d.__doc__
@tf_export("nn.with_space_to_batch")
def with_space_to_batch(
input, # pylint: disable=redefined-builtin
dilation_rate,
padding,
op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Performs `op` on the space-to-batch representation of `input`.
This has the effect of transforming sliding window operations into the
corresponding "atrous" operation in which the input is sampled at the
specified `dilation_rate`.
In the special case that `dilation_rate` is uniformly 1, this simply returns:
op(input, num_spatial_dims, padding)
Otherwise, it returns:
batch_to_space_nd(
op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings),
num_spatial_dims,
"VALID")
adjusted_dilation_rate,
adjusted_crops),
where:
adjusted_dilation_rate is an int64 tensor of shape [max(spatial_dims)],
adjusted_{paddings,crops} are int64 tensors of shape [max(spatial_dims), 2]
defined as follows:
We first define two int64 tensors `paddings` and `crops` of shape
`[num_spatial_dims, 2]` based on the value of `padding` and the spatial
dimensions of the `input`:
If `padding = "VALID"`, then:
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate)
If `padding = "SAME"`, then:
dilated_filter_shape =
filter_shape + (filter_shape - 1) * (dilation_rate - 1)
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate,
[(dilated_filter_shape - 1) // 2,
dilated_filter_shape - 1 - (dilated_filter_shape - 1) // 2])
Because `space_to_batch_nd` and `batch_to_space_nd` assume that the spatial
dimensions are contiguous starting at the second dimension, but the specified
`spatial_dims` may not be, we must adjust `dilation_rate`, `paddings` and
`crops` in order to be usable with these operations. For a given dimension,
if the block size is 1, and both the starting and ending padding and crop
amounts are 0, then space_to_batch_nd effectively leaves that dimension alone,
which is what is needed for dimensions not part of `spatial_dims`.
Furthermore, `space_to_batch_nd` and `batch_to_space_nd` handle this case
efficiently for any number of leading and trailing dimensions.
For 0 <= i < len(spatial_dims), we assign:
adjusted_dilation_rate[spatial_dims[i] - 1] = dilation_rate[i]
adjusted_paddings[spatial_dims[i] - 1, :] = paddings[i, :]
adjusted_crops[spatial_dims[i] - 1, :] = crops[i, :]
All unassigned values of `adjusted_dilation_rate` default to 1, while all
unassigned values of `adjusted_paddings` and `adjusted_crops` default to 0.
Note in the case that `dilation_rate` is not uniformly 1, specifying "VALID"
padding is equivalent to specifying `padding = "SAME"` with a filter_shape of
`[1]*N`.
Advanced usage. Note the following optimization: A sequence of
`with_space_to_batch` operations with identical (not uniformly 1)
`dilation_rate` parameters and "VALID" padding
net = with_space_to_batch(net, dilation_rate, "VALID", op_1)
...
net = with_space_to_batch(net, dilation_rate, "VALID", op_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "VALID")
...
result = op_k(result, num_spatial_dims, "VALID")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
This eliminates the overhead of `k-1` calls to `space_to_batch_nd` and
`batch_to_space_nd`.
Similarly, a sequence of `with_space_to_batch` operations with identical (not
uniformly 1) `dilation_rate` parameters, "SAME" padding, and odd filter
dimensions
net = with_space_to_batch(net, dilation_rate, "SAME", op_1, filter_shape_1)
...
net = with_space_to_batch(net, dilation_rate, "SAME", op_k, filter_shape_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "SAME")
...
result = op_k(result, num_spatial_dims, "SAME")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
Args:
input: Tensor of rank > max(spatial_dims).
dilation_rate: int32 Tensor of *known* shape [num_spatial_dims].
padding: str constant equal to "VALID" or "SAME"
op: Function that maps (input, num_spatial_dims, padding) -> output
filter_shape: If padding = "SAME", specifies the shape of the convolution
kernel/pooling window as an integer Tensor of shape [>=num_spatial_dims].
If padding = "VALID", filter_shape is ignored and need not be specified.
spatial_dims: Monotonically increasing sequence of `num_spatial_dims`
integers (which are >= 1) specifying the spatial dimensions of `input`
and output. Defaults to: `range(1, num_spatial_dims+1)`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
The output Tensor as described above, dimensions will vary based on the op
provided.
Raises:
ValueError: if `padding` is invalid or the arguments are incompatible.
ValueError: if `spatial_dims` are invalid.
"""
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
def build_op(num_spatial_dims, padding):
return lambda inp, _: op(inp, num_spatial_dims, padding)
new_op = _WithSpaceToBatch(
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
return new_op(input, None)
class _WithSpaceToBatch(object):
"""Helper class for with_space_to_batch.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments
input_shape: static shape of input. i.e. input.get_shape().
dilation_rate: see with_space_to_batch
padding: see with_space_to_batch
build_op: Function that maps (num_spatial_dims, paddings) -> (function that
maps (input, filter) -> output).
filter_shape: see with_space_to_batch
spatial_dims: see with_space_to_batch
data_format: see with_space_to_batch
"""
def __init__(self,
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Helper class for _with_space_to_batch."""
dilation_rate = ops.convert_to_tensor(
dilation_rate, dtypes.int32, name="dilation_rate")
try:
rate_shape = dilation_rate.get_shape().with_rank(1)
except ValueError:
raise ValueError("rate must be rank 1")
if not dilation_rate.get_shape().is_fully_defined():
raise ValueError("rate must have known shape")
num_spatial_dims = rate_shape.dims[0].value
if data_format is not None and data_format.startswith("NC"):
starting_spatial_dim = 2
else:
starting_spatial_dim = 1
if spatial_dims is None:
spatial_dims = range(starting_spatial_dim,
num_spatial_dims + starting_spatial_dim)
orig_spatial_dims = list(spatial_dims)
spatial_dims = sorted(set(int(x) for x in orig_spatial_dims))
if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims):
raise ValueError(
"spatial_dims must be a montonically increasing sequence of positive "
"integers") # pylint: disable=line-too-long
if data_format is not None and data_format.startswith("NC"):
expected_input_rank = spatial_dims[-1]
else:
expected_input_rank = spatial_dims[-1] + 1
try:
input_shape.with_rank_at_least(expected_input_rank)
except ValueError:
raise ValueError(
"input tensor must have rank %d at least" % (expected_input_rank))
const_rate = tensor_util.constant_value(dilation_rate)
rate_or_const_rate = dilation_rate
if const_rate is not None:
rate_or_const_rate = const_rate
if np.any(const_rate < 1):
raise ValueError("dilation_rate must be positive")
if np.all(const_rate == 1):
self.call = build_op(num_spatial_dims, padding)
return
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
if filter_shape is None:
raise ValueError("filter_shape must be specified for SAME padding")
filter_shape = ops.convert_to_tensor(filter_shape, name="filter_shape")
const_filter_shape = tensor_util.constant_value(filter_shape)
if const_filter_shape is not None:
filter_shape = const_filter_shape
self.base_paddings = _with_space_to_batch_base_paddings(
const_filter_shape, num_spatial_dims, rate_or_const_rate)
else:
self.num_spatial_dims = num_spatial_dims
self.rate_or_const_rate = rate_or_const_rate
self.base_paddings = None
elif padding == "VALID":
self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32)
else:
raise ValueError("Invalid padding method %r" % padding)
self.input_shape = input_shape
self.spatial_dims = spatial_dims
self.dilation_rate = dilation_rate
self.data_format = data_format
self.op = build_op(num_spatial_dims, "VALID")
self.call = self._with_space_to_batch_call
def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin
"""Call functionality for with_space_to_batch."""
# Handle input whose shape is unknown during graph creation.
input_spatial_shape = None
input_shape = self.input_shape
spatial_dims = self.spatial_dims
if input_shape.ndims is not None:
input_shape_list = input_shape.as_list()
input_spatial_shape = [input_shape_list[i] for i in spatial_dims]
if input_spatial_shape is None or None in input_spatial_shape:
input_shape_tensor = array_ops.shape(inp)
input_spatial_shape = array_ops.stack(
[input_shape_tensor[i] for i in spatial_dims])
base_paddings = self.base_paddings
if base_paddings is None:
# base_paddings could not be computed at build time since static filter
# shape was not fully defined.
filter_shape = array_ops.shape(filter)
base_paddings = _with_space_to_batch_base_paddings(
filter_shape, self.num_spatial_dims, self.rate_or_const_rate)
paddings, crops = array_ops.required_space_to_batch_paddings(
input_shape=input_spatial_shape,
base_paddings=base_paddings,
block_shape=self.dilation_rate)
dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1,
spatial_dims)
paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims)
crops = _with_space_to_batch_adjust(crops, 0, spatial_dims)
input_converted = array_ops.space_to_batch_nd(
input=inp, block_shape=dilation_rate, paddings=paddings)
result = self.op(input_converted, filter)
result_converted = array_ops.batch_to_space_nd(
input=result, block_shape=dilation_rate, crops=crops)
# Recover channel information for output shape if channels are not last.
if self.data_format is not None and self.data_format.startswith("NC"):
if not result_converted.shape.dims[1].value and filter is not None:
output_shape = result_converted.shape.as_list()
output_shape[1] = filter.shape[-1]
result_converted.set_shape(output_shape)
return result_converted
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.call(inp, filter)
def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims,
rate_or_const_rate):
"""Helper function to compute base_paddings."""
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_spatial_shape = filter_shape[:num_spatial_dims]
dilated_filter_spatial_shape = (
filter_spatial_shape + (filter_spatial_shape - 1) *
(rate_or_const_rate - 1))
pad_extra_shape = dilated_filter_spatial_shape - 1
# When full_padding_shape is odd, we pad more at end, following the same
# convention as conv2d.
pad_extra_start = pad_extra_shape // 2
pad_extra_end = pad_extra_shape - pad_extra_start
base_paddings = array_ops.stack(
[[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)])
return base_paddings
def _with_space_to_batch_adjust(orig, fill_value, spatial_dims):
"""Returns an `adjusted` version of `orig` based on `spatial_dims`.
Tensor of the same type as `orig` and with shape
`[max(spatial_dims), ...]` where:
adjusted[spatial_dims[i] - 1, ...] = orig[i, ...]
for 0 <= i < len(spatial_dims), and
adjusted[j, ...] = fill_value
for j != spatial_dims[i] - 1 for some i.
If `orig` is a constant value, then the result will be a constant value.
Args:
orig: Tensor of rank > max(spatial_dims).
fill_value: Numpy scalar (of same data type as `orig) specifying the fill
value for non-spatial dimensions.
spatial_dims: See with_space_to_batch.
Returns:
`adjusted` tensor.
"""
fill_dims = orig.get_shape().as_list()[1:]
dtype = orig.dtype.as_numpy_dtype
parts = []
const_orig = tensor_util.constant_value(orig)
const_or_orig = const_orig if const_orig is not None else orig
prev_spatial_dim = 0
i = 0
while i < len(spatial_dims):
start_i = i
start_spatial_dim = spatial_dims[i]
if start_spatial_dim > 1:
# Fill in any gap from the previous spatial dimension (or dimension 1 if
# this is the first spatial dimension) with `fill_value`.
parts.append(
np.full(
[start_spatial_dim - 1 - prev_spatial_dim] + fill_dims,
fill_value,
dtype=dtype))
# Find the largest value of i such that:
# [spatial_dims[start_i], ..., spatial_dims[i]]
# == [start_spatial_dim, ..., start_spatial_dim + i - start_i],
# i.e. the end of a contiguous group of spatial dimensions.
while (i + 1 < len(spatial_dims) and
spatial_dims[i + 1] == spatial_dims[i] + 1):
i += 1
parts.append(const_or_orig[start_i:i + 1])
prev_spatial_dim = spatial_dims[i]
i += 1
if const_orig is not None:
return np.concatenate(parts)
else:
return array_ops.concat(parts, 0)
def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):
"""Helper function for verifying strides and dilation_rate arguments.
This is used by `convolution` and `pool`.
Args:
num_spatial_dims: int
strides: Optional. List of N ints >= 1. Defaults to [1]*N. If any value
of strides is > 1, then all values of dilation_rate must be 1.
dilation_rate: Optional. List of N ints >= 1. Defaults to [1]*N. If any
value of dilation_rate is > 1, then all values of strides must be 1.
Returns:
Normalized (strides, dilation_rate) as int32 numpy arrays of shape
[num_spatial_dims].
Raises:
ValueError: if the parameters are invalid.
"""
if dilation_rate is None:
dilation_rate = [1] * num_spatial_dims
elif len(dilation_rate) != num_spatial_dims:
raise ValueError("len(dilation_rate)=%d but should be %d" %
(len(dilation_rate), num_spatial_dims))
dilation_rate = np.array(dilation_rate, dtype=np.int32)
if np.any(dilation_rate < 1):
raise ValueError("all values of dilation_rate must be positive")
if strides is None:
strides = [1] * num_spatial_dims
elif len(strides) != num_spatial_dims:
raise ValueError("len(strides)=%d but should be %d" % (len(strides),
num_spatial_dims))
strides = np.array(strides, dtype=np.int32)
if np.any(strides < 1):
raise ValueError("all values of strides must be positive")
if np.any(strides > 1) and np.any(dilation_rate > 1):
raise ValueError(
"strides > 1 not supported in conjunction with dilation_rate > 1")
return strides, dilation_rate
@tf_export(v1=["nn.convolution"])
def convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None,
filters=None,
dilations=None):
# pylint: disable=line-too-long
"""Computes sums of N-D convolutions (actually cross-correlation).
This also supports either output striding via the optional `strides` parameter
or atrous convolution (also known as convolution with holes or dilated
convolution, based on the French word "trous" meaning holes in English) via
the optional `dilation_rate` parameter. Currently, however, output striding
is not supported for atrous convolutions.
Specifically, in the case that `data_format` does not start with "NC", given
a rank (N+2) `input` Tensor of shape
[num_batches,
input_spatial_shape[0],
...,
input_spatial_shape[N-1],
num_input_channels],
a rank (N+2) `filter` Tensor of shape
[spatial_filter_shape[0],
...,
spatial_filter_shape[N-1],
num_input_channels,
num_output_channels],
an optional `dilation_rate` tensor of shape [N] (defaulting to [1]*N)
specifying the filter upsampling/input downsampling rate, and an optional list
of N `strides` (defaulting [1]*N), this computes for each N-D spatial output
position (x[0], ..., x[N-1]):
```
output[b, x[0], ..., x[N-1], k] =
sum_{z[0], ..., z[N-1], q}
filter[z[0], ..., z[N-1], q, k] *
padded_input[b,
x[0]*strides[0] + dilation_rate[0]*z[0],
...,
x[N-1]*strides[N-1] + dilation_rate[N-1]*z[N-1],
q]
```
where b is the index into the batch, k is the output channel number, q is the
input channel number, and z is the N-D spatial offset within the filter. Here,
`padded_input` is obtained by zero padding the input using an effective
spatial filter shape of `(spatial_filter_shape-1) * dilation_rate + 1` and
output striding `strides` as described in the
[comment here](https://tensorflow.org/api_guides/python/nn#Convolution).
In the case that `data_format` does start with `"NC"`, the `input` and output
(but not the `filter`) are simply transposed as follows:
convolution(input, data_format, **kwargs) =
tf.transpose(convolution(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
It is required that 1 <= N <= 3.
Args:
input: An (N+2)-D `Tensor` of type `T`, of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
filter: An (N+2)-D `Tensor` with the same type as `input` and shape
`spatial_filter_shape + [in_channels, out_channels]`.
padding: A string, either `"VALID"` or `"SAME"`. The padding algorithm.
strides: Optional. Sequence of N ints >= 1. Specifies the output stride.
Defaults to [1]*N. If any value of strides is > 1, then all values of
dilation_rate must be 1.
dilation_rate: Optional. Sequence of N ints >= 1. Specifies the filter
upsampling/input downsampling rate. In the literature, the same parameter
is sometimes called `input stride` or `dilation`. The effective filter
size used for the convolution will be `spatial_filter_shape +
(spatial_filter_shape - 1) * (rate - 1)`, obtained by inserting
(dilation_rate[i]-1) zeros between consecutive elements of the original
filter in each spatial dimension i. If any value of dilation_rate is > 1,
then all values of strides must be 1.
name: Optional name for the returned tensor.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
filters: Alias of filter.
Returns:
A `Tensor` with the same type as `input` of shape
`[batch_size] + output_spatial_shape + [out_channels]`
if data_format is None or does not start with "NC", or
`[batch_size, out_channels] + output_spatial_shape`
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of `padding`.
If padding == "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding == "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] -
(spatial_filter_shape[i]-1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: If input/output depth does not match `filter` shape, if padding
is other than `"VALID"` or `"SAME"`, or if data_format is invalid.
"""
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
dilation_rate = deprecated_argument_lookup(
"dilations", dilations, "dilation_rate", dilation_rate)
return convolution_internal(
input,
filter,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilation_rate,
name=name)
@tf_export("nn.convolution", v1=[])
def convolution_v2(
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
return convolution_internal(
input, # pylint: disable=redefined-builtin
filters,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
convolution_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
convolution.__doc__, "dilation_rate", "dilations"),
"filter", "filters")
def convolution_internal(
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
"""Internal function which performs rank agnostic convolution."""
with ops.name_scope(name, "convolution", [input, filters]) as name:
if isinstance(input.shape, tensor_shape.TensorShape) and \
input.shape.rank is not None:
n = len(input.shape) - 2
elif not isinstance(input.shape, tensor_shape.TensorShape) and \
input.shape is not None:
n = len(input.shape) - 2
elif isinstance(filters.shape, tensor_shape.TensorShape) and \
filters.shape.rank is not None:
n = len(filters.shape) - 2
elif not isinstance(filters.shape, tensor_shape.TensorShape) and \
filters.shape is not None:
n = len(filters.shape) - 2
else:
raise ValueError("rank of input or filter must be known")
if n < 1 or n > 3:
raise ValueError(
"Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2))
if data_format is None:
channel_index = n + 1
else:
channel_index = 1 if data_format.startswith("NC") else n + 1
strides = _get_sequence(strides, n, channel_index, "strides")
dilations = _get_sequence(dilations, n, channel_index, "dilations")
conv_ops = {1: conv1d, 2: gen_nn_ops.conv2d, 3: gen_nn_ops.conv3d}
if all(i == 1 for i in dilations):
# fast path if no dilation as gradient only supported on GPU for dilations
op = conv_ops.get(n)
return op(
input,
filters,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
else:
if channel_index == 1:
strides = strides[2:]
dilations = dilations[2:]
else:
strides = strides[1:-1]
dilations = dilations[1:-1]
op = Convolution(
tensor_shape.as_shape(input.shape),
tensor_shape.as_shape(filters.shape),
padding,
strides=strides,
dilation_rate=dilations,
name=name,
data_format=data_format)
return op(input, filters)
class Convolution(object):
"""Helper class for convolution.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments
input_shape: static shape of input. i.e. input.get_shape().
filter_shape: static shape of the filter. i.e. filter.get_shape().
padding: see convolution.
strides: see convolution.
dilation_rate: see convolution.
name: see convolution.
data_format: see convolution.
"""
def __init__(self,
input_shape,
filter_shape,
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None):
"""Helper function for convolution."""
num_total_dims = filter_shape.ndims
if num_total_dims is None:
num_total_dims = input_shape.ndims
if num_total_dims is None:
raise ValueError("rank of input or filter must be known")
num_spatial_dims = num_total_dims - 2
try:
input_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError(
"input tensor must have rank %d" % (num_spatial_dims + 2))
try:
filter_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError(
"filter tensor must have rank %d" % (num_spatial_dims + 2))
if data_format is None or not data_format.startswith("NC"):
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_spatial_dims + 1)
spatial_dims = range(1, num_spatial_dims + 1)
else:
input_channels_dim = tensor_shape.dimension_at_index(input_shape, 1)
spatial_dims = range(2, num_spatial_dims + 2)
if not input_channels_dim.is_compatible_with(
filter_shape[num_spatial_dims]):
raise ValueError(
"number of input channels does not match corresponding dimension of "
"filter, {} != {}".format(input_channels_dim,
filter_shape[num_spatial_dims]))
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
self.input_shape = input_shape
self.filter_shape = filter_shape
self.data_format = data_format
self.strides = strides
self.name = name
self.conv_op = _WithSpaceToBatch(
input_shape,
dilation_rate=dilation_rate,
padding=padding,
build_op=self._build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
def _build_op(self, _, padding):
return _NonAtrousConvolution(
self.input_shape,
filter_shape=self.filter_shape,
padding=padding,
data_format=self.data_format,
strides=self.strides,
name=self.name)
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(inp, filter)
@tf_export(v1=["nn.pool"])
def pool(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
padding,
dilation_rate=None,
strides=None,
name=None,
data_format=None,
dilations=None):
# pylint: disable=line-too-long
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape
`[batch_size] + input_spatial_shape + [num_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
See the "returns" section of `tf.nn.convolution` for details.
dilation_rate: Optional. Dilation rate. List of N ints >= 1.
Defaults to [1]*N. If any value of dilation_rate is > 1, then all values
of strides must be 1.
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N.
If any value of strides is > 1, then all values of dilation_rate must be
1.
name: Optional. Name of the op.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: Alias for dilation_rate
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
dilation_rate = deprecated_argument_lookup(
"dilations", dilations, "dilation_rate", dilation_rate)
# pylint: enable=line-too-long
with ops.name_scope(name, "%s_pool" % (pooling_type.lower()),
[input]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
num_spatial_dims = len(window_shape)
if num_spatial_dims < 1 or num_spatial_dims > 3:
raise ValueError("It is required that 1 <= num_spatial_dims <= 3.")
input.get_shape().with_rank(num_spatial_dims + 2)
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
if padding == "SAME" and np.any(dilation_rate > 1):
raise ValueError(
"pooling with SAME padding is not implemented for dilation_rate > 1")
if np.any(strides > window_shape):
raise ValueError(
"strides > window_shape not supported due to inconsistency between "
"CPU and GPU implementations")
pooling_ops = {
("MAX", 1): max_pool,
("MAX", 2): max_pool,
("MAX", 3): max_pool3d, # pylint: disable=undefined-variable
("AVG", 1): avg_pool,
("AVG", 2): avg_pool,
("AVG", 3): avg_pool3d, # pylint: disable=undefined-variable
}
op_key = (pooling_type, num_spatial_dims)
if op_key not in pooling_ops:
raise ValueError("%d-D %s pooling is not supported." % (op_key[1],
op_key[0]))
if data_format is None or not data_format.startswith("NC"):
adjusted_window_shape = [1] + list(window_shape) + [1]
adjusted_strides = [1] + list(strides) + [1]
spatial_dims = range(1, num_spatial_dims + 1)
else:
adjusted_window_shape = [1, 1] + list(window_shape)
adjusted_strides = [1, 1] + list(strides)
spatial_dims = range(2, num_spatial_dims + 2)
if num_spatial_dims == 1:
if data_format is None or data_format == "NWC":
data_format_kwargs = dict(data_format="NHWC")
elif data_format == "NCW":
data_format_kwargs = dict(data_format="NCHW")
else:
raise ValueError("data_format must be either \"NWC\" or \"NCW\".")
adjusted_window_shape = [1] + adjusted_window_shape
adjusted_strides = [1] + adjusted_strides
else:
data_format_kwargs = dict(data_format=data_format)
def op(converted_input, _, converted_padding): # pylint: disable=missing-docstring
if num_spatial_dims == 1:
converted_input = array_ops.expand_dims(converted_input,
spatial_dims[0])
result = pooling_ops[op_key](
converted_input,
adjusted_window_shape,
adjusted_strides,
converted_padding,
name=scope,
**data_format_kwargs)
if num_spatial_dims == 1:
result = array_ops.squeeze(result, [spatial_dims[0]])
return result
return with_space_to_batch(
input=input,
dilation_rate=dilation_rate,
padding=padding,
op=op,
spatial_dims=spatial_dims,
filter_shape=window_shape)
@tf_export("nn.pool", v1=[])
def pool_v2(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
# pylint: disable=line-too-long
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if data_format does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N. If any value of
strides is > 1, then all values of dilation_rate must be 1.
padding: The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME".
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: Optional. Dilation rate. List of N ints >= 1. Defaults to
[1]*N. If any value of dilation_rate is > 1, then all values of strides
must be 1.
name: Optional. Name of the op.
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
return pool(
input=input,
window_shape=window_shape,
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilations,
strides=strides,
name=name,
data_format=data_format)
@tf_export("nn.atrous_conv2d")
def atrous_conv2d(value, filters, rate, padding, name=None):
"""Atrous convolution (a.k.a. convolution with holes or dilated convolution).
This function is a simpler wrapper around the more general
`tf.nn.convolution`, and exists only for backwards compatibility. You can
use `tf.nn.convolution` to perform 1-D, 2-D, or 3-D atrous convolution.
Computes a 2-D atrous convolution, also known as convolution with holes or
dilated convolution, given 4-D `value` and `filters` tensors. If the `rate`
parameter is equal to one, it performs regular 2-D convolution. If the `rate`
parameter is greater than one, it performs convolution with holes, sampling
the input values every `rate` pixels in the `height` and `width` dimensions.
This is equivalent to convolving the input with a set of upsampled filters,
produced by inserting `rate - 1` zeros between two consecutive values of the
filters along the `height` and `width` dimensions, hence the name atrous
convolution or convolution with holes (the French word trous means holes in
English).
More specifically:
```
output[batch, height, width, out_channel] =
sum_{dheight, dwidth, in_channel} (
filters[dheight, dwidth, in_channel, out_channel] *
value[batch, height + rate*dheight, width + rate*dwidth, in_channel]
)
```
Atrous convolution allows us to explicitly control how densely to compute
feature responses in fully convolutional networks. Used in conjunction with
bilinear interpolation, it offers an alternative to `conv2d_transpose` in
dense prediction tasks such as semantic image segmentation, optical flow
computation, or depth estimation. It also allows us to effectively enlarge
the field of view of filters without increasing the number of parameters or
the amount of computation.
For a description of atrous convolution and how it can be used for dense
feature extraction, please see: [Semantic Image Segmentation with Deep
Convolutional Nets and Fully Connected CRFs](http://arxiv.org/abs/1412.7062).
The same operation is investigated further in [Multi-Scale Context Aggregation
by Dilated Convolutions](http://arxiv.org/abs/1511.07122). Previous works
that effectively use atrous convolution in different ways are, among others,
[OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks](http://arxiv.org/abs/1312.6229) and [Fast Image
Scanning with Deep Max-Pooling Convolutional Neural
Networks](http://arxiv.org/abs/1302.1700).
Atrous convolution is also closely related to the so-called noble identities
in multi-rate signal processing.
There are many different ways to implement atrous convolution (see the refs
above). The implementation here reduces
```python
atrous_conv2d(value, filters, rate, padding=padding)
```
to the following three operations:
```python
paddings = ...
net = space_to_batch(value, paddings, block_size=rate)
net = conv2d(net, filters, strides=[1, 1, 1, 1], padding="VALID")
crops = ...
net = batch_to_space(net, crops, block_size=rate)
```
Advanced usage. Note the following optimization: A sequence of `atrous_conv2d`
operations with identical `rate` parameters, 'SAME' `padding`, and filters
with odd heights/ widths:
```python
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
```
can be equivalently performed cheaper in terms of computation and memory as:
```python
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
```
because a pair of consecutive `space_to_batch` and `batch_to_space` ops with
the same `block_size` cancel out when their respective `paddings` and `crops`
inputs are identical.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC"
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, in_channels, out_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Output shape with `'VALID'` padding is:
[batch, height - 2 * (filter_width - 1),
width - 2 * (filter_height - 1), out_channels].
Output shape with `'SAME'` padding is:
[batch, height, width, out_channels].
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
return convolution(
input=value,
filter=filters,
padding=padding,
dilation_rate=np.broadcast_to(rate, (2,)),
name=name)
def _convert_padding(padding):
"""Converts Python padding to C++ padding for ops which take EXPLICIT padding.
Args:
padding: the `padding` argument for a Python op which supports EXPLICIT
padding.
Returns:
(padding, explicit_paddings) pair, which should be passed as attributes to a
C++ op.
Raises:
ValueError: If padding is invalid.
"""
explicit_paddings = []
if padding == "EXPLICIT":
# Give a better error message if EXPLICIT is passed.
raise ValueError('"EXPLICIT" is not a valid value for the padding '
"parameter. To use explicit padding, the padding "
"parameter must be a list.")
if isinstance(padding, (list, tuple)):
for i, dim_paddings in enumerate(padding):
if not isinstance(dim_paddings, (list, tuple)):
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding is not a list/tuple" % i)
if len(dim_paddings) != 2:
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding has size %d" % (i, len(dim_paddings)))
explicit_paddings.extend(dim_paddings)
if len(padding) != 4:
raise ValueError("When padding is a list, it must be of size 4. Got "
"padding of size: %d" % len(padding))
padding = "EXPLICIT"
return padding, explicit_paddings
@tf_export("nn.conv2d", v1=[])
def conv2d_v2(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
# pylint: disable=line-too-long
r"""Computes a 2-D convolution given 4-D `input` and `filters` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filters: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 1. The dimension order is determined
by the value of `data_format`, see below for details.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
# pylint: enable=line-too-long
return conv2d(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d"])
def conv2d( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter=None,
strides=None,
padding=None,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None,
filters=None):
r"""Computes a 2-D convolution given 4-D `input` and `filter` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q]
* filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filter: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 1. The dimension order is determined
by the value of `data_format`, see below for details.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: A name for the operation (optional).
filters: Alias for filter.
Returns:
A `Tensor`. Has the same type as `input`.
"""
filter = deprecation.deprecated_argument_lookup(
"filters", filters, "filter", filter)
padding, explicit_paddings = _convert_padding(padding)
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
strides = _get_sequence(strides, 2, channel_index, "strides")
dilations = _get_sequence(dilations, 2, channel_index, "dilations")
return gen_nn_ops.conv2d(input, # pylint: disable=redefined-builtin
filter,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d_backprop_filter"])
def conv2d_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d_backprop_filter(
input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_backprop_input"])
def conv2d_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value
input_sizes,
filter=None,
out_backprop=None,
strides=None,
padding=None,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None,
filters=None):
r"""Computes the gradients of convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`,
where `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: A `Tensor`. Must have the same type as `filter`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
filters: Alias for filter.
Returns:
A `Tensor`. Has the same type as `filter`.
"""
filter = deprecation.deprecated_argument_lookup(
"filters", filters, "filter", filter)
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d_backprop_input(
input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_transpose"])
def conv2d_transpose(
value=None,
filter=None, # pylint: disable=redefined-builtin
output_shape=None,
strides=None,
padding="SAME",
data_format="NHWC",
name=None,
input=None, # pylint: disable=redefined-builtin
filters=None,
dilations=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv2d` rather than an actual
deconvolution.
Args:
value: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
filter: A 4-D `Tensor` with the same type as `value` and shape
`[height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
input: Alias for value.
filters: Alias for filter.
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
value = deprecated_argument_lookup("input", input, "value", value)
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
with ops.name_scope(name, "conv2d_transpose",
[value, filter, output_shape]) as name:
return conv2d_transpose_v2(
value,
filter,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.conv2d_transpose", v1=[])
def conv2d_transpose_v2(
input,
filters, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NHWC",
dilations=None,
name=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv2d` rather than an actual
deconvolution.
Args:
input: A 4-D `Tensor` of type `float` and shape `[batch, height, width,
in_channels]` for `NHWC` data format or `[batch, in_channels, height,
width]` for `NCHW` data format.
filters: A 4-D `Tensor` with the same type as `value` and shape `[height,
width, output_channels, in_channels]`. `filter`'s `in_channels` dimension
must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "conv2d_transpose",
[input, filter, output_shape]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
strides = _get_sequence(strides, 2, channel_index, "strides")
dilations = _get_sequence(dilations, 2, channel_index, "dilations")
return gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.atrous_conv2d_transpose")
def atrous_conv2d_transpose(value,
filters,
output_shape,
rate,
padding,
name=None):
"""The transpose of `atrous_conv2d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `atrous_conv2d` rather than an actual
deconvolution.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default `NHWC`
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, out_channels, in_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
output_shape: A 1-D `Tensor` of shape representing the output shape of the
deconvolution op.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`, or if the `rate` is less
than one, or if the output_shape is not a tensor with 4 elements.
"""
with ops.name_scope(name, "atrous_conv2d_transpose",
[value, filters, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filters = ops.convert_to_tensor(filters, name="filters")
if not value.get_shape().dims[3].is_compatible_with(filters.get_shape()[3]):
raise ValueError(
"value's input channels does not match filters' input channels, "
"{} != {}".format(value.get_shape()[3],
filters.get_shape()[3]))
if rate < 1:
raise ValueError("rate {} cannot be less than one".format(rate))
if rate == 1:
return conv2d_transpose(
value,
filters,
output_shape,
strides=[1, 1, 1, 1],
padding=padding,
data_format="NHWC")
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filters.get_shape().dims[2].is_compatible_with(output_shape[3]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[3],
filters.get_shape()[2]))
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
# Handle filters whose shape is unknown during graph creation.
if filters.get_shape().is_fully_defined():
filter_shape = filters.get_shape().as_list()
else:
filter_shape = array_ops.shape(filters)
filter_height, filter_width = filter_shape[0], filter_shape[1]
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_height_up = filter_height + (filter_height - 1) * (rate - 1)
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad_height = filter_height_up - 1
pad_width = filter_width_up - 1
# When pad_height (pad_width) is odd, we pad more to bottom (right),
# following the same convention as conv2d().
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
elif padding == "VALID":
pad_top = 0
pad_bottom = 0
pad_left = 0
pad_right = 0
else:
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
in_height = output_shape[1] + pad_top + pad_bottom
in_width = output_shape[2] + pad_left + pad_right
# More padding so that rate divides the height and width of the input.
pad_bottom_extra = (rate - in_height % rate) % rate
pad_right_extra = (rate - in_width % rate) % rate
# The paddings argument to space_to_batch is just the extra padding
# component.
space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]]
value = array_ops.space_to_batch(
input=value, paddings=space_to_batch_pad, block_size=rate)
input_sizes = [
rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate,
(in_width + pad_right_extra) // rate, output_shape[3]
]
value = gen_nn_ops.conv2d_backprop_input(
input_sizes=input_sizes,
filter=filters,
out_backprop=value,
strides=[1, 1, 1, 1],
padding="VALID",
data_format="NHWC")
# The crops argument to batch_to_space includes both padding components.
batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra],
[pad_left, pad_right + pad_right_extra]]
return array_ops.batch_to_space(
input=value, crops=batch_to_space_crop, block_size=rate)
@tf_export("nn.conv3d", v1=[])
def conv3d_v2(input, # pylint: disable=redefined-builtin,missing-docstring
filters,
strides,
padding,
data_format="NDHWC",
dilations=None,
name=None):
if dilations is None:
dilations = [1, 1, 1, 1, 1]
return gen_nn_ops.conv3d(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv3d"])
def conv3d_v1( # pylint: disable=missing-docstring,dangerous-default-value
input, # pylint: disable=redefined-builtin
filter=None, # pylint: disable=redefined-builtin
strides=None,
padding=None,
data_format="NDHWC",
dilations=[1, 1, 1, 1, 1],
name=None,
filters=None):
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
return gen_nn_ops.conv3d(
input, filter, strides, padding, data_format, dilations, name)
conv3d_v2.__doc__ = deprecation.rewrite_argument_docstring(
gen_nn_ops.conv3d.__doc__, "filter", "filters")
conv3d_v1.__doc__ = gen_nn_ops.conv3d.__doc__
@tf_export(v1=["nn.conv3d_transpose"])
def conv3d_transpose(
value,
filter=None, # pylint: disable=redefined-builtin
output_shape=None,
strides=None,
padding="SAME",
data_format="NDHWC",
name=None,
input=None, # pylint: disable=redefined-builtin
filters=None,
dilations=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv3d` rather than an actual
deconvolution.
Args:
value: A 5-D `Tensor` of type `float` and shape
`[batch, depth, height, width, in_channels]`.
filter: A 5-D `Tensor` with the same type as `value` and shape
`[depth, height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string, either `'NDHWC'` or `'NCDHW`' specifying the layout
of the input and output tensors. Defaults to `'NDHWC'`.
name: Optional name for the returned tensor.
input: Alias of value.
filters: Alias of filter.
dilations: An int or list of `ints` that has length `1`, `3` or `5`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `D`, `H` and `W` dimension.
By default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 5-d tensor
must be 1.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
value = deprecated_argument_lookup("input", input, "value", value)
return conv3d_transpose_v2(
value,
filter,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.conv3d_transpose", v1=[])
def conv3d_transpose_v2(input,
filters,
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
dilations=None,
name=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv2d` rather than an actual
deconvolution.
Args:
input: A 5-D `Tensor` of type `float` and shape `[batch, height, width,
in_channels]` for `NHWC` data format or `[batch, in_channels, height,
width]` for `NCHW` data format.
filters: A 5-D `Tensor` with the same type as `value` and shape `[height,
width, output_channels, in_channels]`. `filter`'s `in_channels` dimension
must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `D`, `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 0. The dimension order is
determined by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NDHWC' and 'NCDHW' are supported.
dilations: An int or list of `ints` that has length `1`, `3` or `5`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `D`, `H` and `W` dimension.
By default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 5-d tensor
must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "conv3d_transpose",
[input, filter, output_shape]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 4
strides = _get_sequence(strides, 3, channel_index, "strides")
dilations = _get_sequence(dilations, 3, channel_index, "dilations")
return gen_nn_ops.conv3d_backprop_input_v2(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.bias_add")
def bias_add(value, bias, data_format=None, name=None):
"""Adds `bias` to `value`.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
data_format: A string. 'N...C' and 'NC...' are supported.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAdd", [value, bias]) as name:
if data_format is not None:
if data_format.startswith("NC"):
data_format = "NCHW"
elif data_format.startswith("N") and data_format.endswith("C"):
data_format = "NHWC"
else:
raise ValueError("data_format must be of the form `N...C` or `NC...`")
if not context.executing_eagerly():
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add(value, bias, data_format=data_format, name=name)
def bias_add_v1(value, bias, name=None):
"""Adds `bias` to `value`.
This is a deprecated version of bias_add and will soon to be removed.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAddV1", [value, bias]) as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add_v1(value, bias, name=name)
@tf_export(v1=["nn.crelu"])
def crelu(features, name=None, axis=-1):
"""Computes Concatenated ReLU.
Concatenates a ReLU which selects only the positive part of the activation
with a ReLU which selects only the *negative* part of the activation.
Note that as a result this non-linearity doubles the depth of the activations.
Source: [Understanding and Improving Convolutional Neural Networks via
Concatenated Rectified Linear Units. W. Shang, et
al.](https://arxiv.org/abs/1603.05201)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
axis: The axis that the output values are concatenated along. Default is -1.
Returns:
A `Tensor` with the same type as `features`.
"""
with ops.name_scope(name, "CRelu", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
c = array_ops.concat([features, -features], axis, name=name)
return gen_nn_ops.relu(c)
@tf_export("nn.crelu", v1=[])
def crelu_v2(features, axis=-1, name=None):
return crelu(features, name=name, axis=axis)
crelu_v2.__doc__ = crelu.__doc__
@tf_export("nn.relu6")
def relu6(features, name=None):
"""Computes Rectified Linear 6: `min(max(features, 0), 6)`.
Source: [Convolutional Deep Belief Networks on CIFAR-10. A.
Krizhevsky](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
"""
with ops.name_scope(name, "Relu6", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
return gen_nn_ops.relu6(features, name=name)
@tf_export("nn.leaky_relu")
def leaky_relu(features, alpha=0.2, name=None):
"""Compute the Leaky ReLU activation function.
"Rectifier Nonlinearities Improve Neural Network Acoustic Models"
AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013
https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf
Args:
features: A `Tensor` representing preactivation values. Must be one of
the following types: `float16`, `float32`, `float64`, `int32`, `int64`.
alpha: Slope of the activation function at x < 0.
name: A name for the operation (optional).
Returns:
The activation value.
"""
with ops.name_scope(name, "LeakyRelu", [features, alpha]) as name:
features = ops.convert_to_tensor(features, name="features")
if features.dtype.is_integer:
features = math_ops.to_float(features)
if compat.forward_compatible(2018, 11, 1):
if isinstance(alpha, np.ndarray):
alpha = alpha.item()
return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name)
alpha = ops.convert_to_tensor(alpha, dtype=features.dtype, name="alpha")
return math_ops.maximum(alpha * features, features, name=name)
def _flatten_outer_dims(logits):
"""Flattens logits' outer dimensions and keep its last dimension."""
rank = array_ops.rank(logits)
last_dim_size = array_ops.slice(
array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])
output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0))
# Set output shape if known.
if not context.executing_eagerly():
shape = logits.get_shape()
if shape is not None and shape.dims is not None:
shape = shape.as_list()
product = 1
product_valid = True
for d in shape[:-1]:
if d is None:
product_valid = False
break
else:
product *= d
if product_valid:
output_shape = [product, shape[-1]]
output.set_shape(output_shape)
return output
def _softmax(logits, compute_op, dim=-1, name=None):
"""Helper function for softmax and log_softmax.
It reshapes and transposes the input logits into a 2-D Tensor and then invokes
the tf.nn._softmax or tf.nn._log_softmax function. The output would be
transposed and reshaped back.
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
compute_op: Either gen_nn_ops.softmax or gen_nn_ops.log_softmax
dim: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `dim` is beyond the last
dimension of `logits`.
"""
def _swap_axis(logits, dim_index, last_index, name=None):
"""Swaps logits's dim_index and last_index."""
return array_ops.transpose(
logits,
array_ops.concat([
math_ops.range(dim_index), [last_index],
math_ops.range(dim_index + 1, last_index), [dim_index]
], 0),
name=name)
logits = ops.convert_to_tensor(logits)
# We need its original shape for shape inference.
shape = logits.get_shape()
is_last_dim = (dim is -1) or (dim == shape.ndims - 1)
if is_last_dim:
return compute_op(logits, name=name)
dim_val = dim
if isinstance(dim, ops.Tensor):
dim_val = tensor_util.constant_value(dim)
if dim_val is not None and (dim_val < -shape.ndims or dim_val >= shape.ndims):
raise errors_impl.InvalidArgumentError(
None, None,
"Dimension (%d) must be in the range [%d, %d) where %d is the number of"
" dimensions in the input." % (dim_val, -shape.ndims, shape.ndims,
shape.ndims))
# If dim is not the last dimension, we have to do a transpose so that we can
# still perform softmax on its last dimension.
# In case dim is negative (and is not last dimension -1), add shape.ndims
ndims = array_ops.rank(logits)
if not isinstance(dim, ops.Tensor):
if dim < 0:
dim += ndims
else:
dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim)
# Swap logits' dimension of dim and its last dimension.
input_rank = array_ops.rank(logits)
dim_axis = dim % shape.ndims
logits = _swap_axis(logits, dim_axis, math_ops.subtract(input_rank, 1))
# Do the actual softmax on its last dimension.
output = compute_op(logits)
output = _swap_axis(
output, dim_axis, math_ops.subtract(input_rank, 1), name=name)
# Make shape inference work since transpose may erase its static shape.
output.set_shape(shape)
return output
@tf_export(v1=["nn.softmax", "math.softmax"])
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax(logits, axis=None, name=None, dim=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export("nn.softmax", "math.softmax", v1=[])
def softmax_v2(logits, axis=None, name=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export(v1=["nn.log_softmax", "math.log_softmax"])
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def log_softmax(logits, axis=None, name=None, dim=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
@tf_export("nn.log_softmax", "math.log_softmax", v1=[])
def log_softmax_v2(logits, axis=None, name=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
def _ensure_xent_args(name, sentinel, labels, logits):
# Make sure that all arguments were passed as named arguments.
if sentinel is not None:
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)" % name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
@tf_export("nn.softmax_cross_entropy_with_logits", v1=[])
def softmax_cross_entropy_with_logits_v2(labels, logits, axis=-1, name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
return softmax_cross_entropy_with_logits_v2_helper(
labels=labels, logits=logits, axis=axis, name=name)
@tf_export(v1=["nn.softmax_cross_entropy_with_logits_v2"])
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax_cross_entropy_with_logits_v2_helper(
labels, logits, axis=None, name=None, dim=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for axis.
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
# TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This
# could break users who call this with bad labels, but disregard the bad
# results.
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
del dim
if axis is None:
axis = -1
with ops.name_scope(name, "softmax_cross_entropy_with_logits",
[logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
convert_to_float32 = (
logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16)
precise_logits = math_ops.cast(
logits, dtypes.float32) if convert_to_float32 else logits
# labels and logits must be of the same type
labels = math_ops.cast(labels, precise_logits.dtype)
input_rank = array_ops.rank(precise_logits)
# For shape inference.
shape = logits.get_shape()
# Move the dim to the end if dim is not the last dimension.
if axis != -1:
def _move_dim_to_end(tensor, dim_index, rank):
return array_ops.transpose(
tensor,
array_ops.concat([
math_ops.range(dim_index),
math_ops.range(dim_index + 1, rank), [dim_index]
], 0))
precise_logits = _move_dim_to_end(precise_logits, axis, input_rank)
labels = _move_dim_to_end(labels, axis, input_rank)
input_shape = array_ops.shape(precise_logits)
# Make precise_logits and labels into matrices.
precise_logits = _flatten_outer_dims(precise_logits)
labels = _flatten_outer_dims(labels)
# Do the actual op computation.
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
# The output cost shape should be the input minus axis.
output_shape = array_ops.slice(input_shape, [0],
[math_ops.subtract(input_rank, 1)])
cost = array_ops.reshape(cost, output_shape)
# Make shape inference work since reshape and transpose may erase its static
# shape.
if not context.executing_eagerly(
) and shape is not None and shape.dims is not None:
shape = shape.as_list()
del shape[axis]
cost.set_shape(shape)
if convert_to_float32:
return math_ops.cast(cost, logits.dtype)
else:
return cost
_XENT_DEPRECATION = """
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See `tf.nn.softmax_cross_entropy_with_logits_v2`.
"""
@tf_export(v1=["nn.softmax_cross_entropy_with_logits"])
@deprecation.deprecated(date=None, instructions=_XENT_DEPRECATION)
def softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
dim=-1,
name=None,
axis=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `dim` argument specifying the class dimension.
Backpropagation will happen only into `logits`. To calculate a cross entropy
loss that allows backpropagation into both `logits` and `labels`, see
`tf.nn.softmax_cross_entropy_with_logits_v2`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
dim: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
axis: Alias for dim.
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
dim = deprecated_argument_lookup("axis", axis, "dim", dim)
_ensure_xent_args("softmax_cross_entropy_with_logits", _sentinel, labels,
logits)
with ops.name_scope(name, "softmax_cross_entropy_with_logits_sg",
[logits, labels]) as name:
labels = array_ops.stop_gradient(labels, name="labels_stop_gradient")
return softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits, axis=dim, name=name)
@tf_export("nn.sparse_softmax_cross_entropy_with_logits")
def sparse_softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits_v2`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape
`[batch_size, num_classes]` and have labels of shape
`[batch_size]`, but higher dimensions are supported, in which
case the `dim`-th dimension is assumed to be of size `num_classes`.
`logits` must have the dtype of `float16`, `float32`, or `float64`, and
`labels` must have the dtype of `int32` or `int64`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or
`float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
_ensure_xent_args("sparse_softmax_cross_entropy_with_logits", _sentinel,
labels, logits)
# TODO(pcmurray) Raise an error when the label is not an index in
# [0, num_classes). Note: This could break users who call this with bad
# labels, but disregard the bad results.
# Reshape logits and labels to rank 2.
with ops.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits",
[labels, logits]):
labels = ops.convert_to_tensor(labels)
logits = ops.convert_to_tensor(logits)
precise_logits = math_ops.cast(logits, dtypes.float32) if (dtypes.as_dtype(
logits.dtype) == dtypes.float16) else logits
# Store label shape for result later.
labels_static_shape = labels.get_shape()
labels_shape = array_ops.shape(labels)
static_shapes_fully_defined = (
labels_static_shape.is_fully_defined() and
logits.get_shape()[:-1].is_fully_defined())
if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0:
raise ValueError(
"Logits cannot be scalars - received shape %s." % logits.get_shape())
if logits.get_shape().ndims is not None and (
labels_static_shape.ndims is not None and
labels_static_shape.ndims != logits.get_shape().ndims - 1):
raise ValueError("Rank mismatch: Rank of labels (received %s) should "
"equal rank of logits minus 1 (received %s)." %
(labels_static_shape.ndims, logits.get_shape().ndims))
if (static_shapes_fully_defined and
labels_static_shape != logits.get_shape()[:-1]):
raise ValueError("Shape mismatch: The shape of labels (received %s) "
"should equal the shape of logits except for the last "
"dimension (received %s)." % (labels_static_shape,
logits.get_shape()))
# Check if no reshapes are required.
if logits.get_shape().ndims == 2:
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
# Perform a check of the dynamic shapes if the static shapes are not fully
# defined.
shape_checks = []
if not static_shapes_fully_defined:
shape_checks.append(
check_ops.assert_equal(
array_ops.shape(labels),
array_ops.shape(logits)[:-1]))
with ops.control_dependencies(shape_checks):
# Reshape logits to 2 dim, labels to 1 dim.
num_classes = array_ops.shape(logits)[array_ops.rank(logits) - 1]
precise_logits = array_ops.reshape(precise_logits, [-1, num_classes])
labels = array_ops.reshape(labels, [-1])
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
cost = array_ops.reshape(cost, labels_shape)
cost.set_shape(labels_static_shape)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
@tf_export("nn.avg_pool")
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: A list or tuple of 4 ints. The size of the window for each dimension
of the input tensor.
strides: A list or tuple of 4 ints. The stride of the sliding window for
each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops.avg_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool", v1=["nn.max_pool_v2"])
def max_pool_v2(input, ksize, strides, padding, data_format=None, name=None):
"""Performs the max pooling on the input.
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if `data_format` does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size
of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. Specifies the channel dimension. For N=1 it can be
either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default)
or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW".
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
if input.shape is not None:
n = len(input.shape) - 2
elif data_format is not None:
n = len(data_format) - 2
else:
raise ValueError(
"The input must have a rank or a data format must be given.")
if n < 1 or n > 3:
raise ValueError(
"Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2))
if data_format is None:
channel_index = n + 1
else:
channel_index = 1 if data_format.startswith("NC") else n + 1
ksize = _get_sequence(ksize, n, channel_index, "ksize")
strides = _get_sequence(strides, n, channel_index, "strides")
max_pooling_ops = {
1: max_pool1d,
2: gen_nn_ops.max_pool,
3: gen_nn_ops.max_pool3d
}
op = max_pooling_ops.get(n)
return op(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
@tf_export(v1=["nn.max_pool"])
def max_pool(value,
ksize,
strides,
padding,
data_format="NHWC",
name=None,
input=None): # pylint: disable=redefined-builtin
"""Performs the max pooling on the input.
Args:
value: A 4-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`.
The size of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`.
The stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
input: Alias for value.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
value = deprecation.deprecated_argument_lookup("input", input, "value", value)
with ops.name_scope(name, "MaxPool", [value]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
return gen_nn_ops.max_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool1d")
def max_pool1d(input, ksize, strides, padding, data_format="NWC", name=None):
"""Performs the max pooling on the input.
Note internally this op reshapes and uses the underlying 2d operation.
Args:
input: A 3-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1` or `3`. The size of the
window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1` or `3`. The stride of
the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NWC", "NCW". Defaults to "NWC".
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool1d", [input]) as name:
if data_format is None:
data_format = "NWC"
channel_index = 1 if data_format.startswith("NC") else 2
ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize")
strides = [1] + _get_sequence(strides, 1, channel_index, "strides")
data_format = "NHWC" if data_format == "NWC" else "NCHW"
expanding_dim = 1 if data_format == "NWC" else 2
input = array_ops.expand_dims_v2(input, expanding_dim)
result = gen_nn_ops.max_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return array_ops.squeeze(result, expanding_dim)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool2d")
def max_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the max pooling on the input.
Args:
input: A 4-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool2d", [input]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
return gen_nn_ops.max_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool3d")
def max_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None):
"""Performs the max pooling on the input.
Args:
input: A 5-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is: [batch, in_channels, in_depth, in_height,
in_width].
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool3D", [input]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 4
ksize = _get_sequence(ksize, 3, channel_index, "ksize")
strides = _get_sequence(strides, 3, channel_index, "strides")
return gen_nn_ops.max_pool3d(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool_with_argmax", v1=[])
def max_pool_with_argmax_v2(input,
ksize,
strides,
padding,
data_format="NHWC",
output_dtype=dtypes.int64,
name=None):
"""Performs max pooling on the input and outputs both max values and indices.
The indices in `argmax` are flattened, so that a maximum value at position
`[b, y, x, c]` becomes flattened index
`((b * height + y) * width + x) * channels + c`.
The indices returned are always in `[0, height) x [0, width)` before
flattening, even if padding is involved and the mathematically correct answer
is outside (either negative or too large). This is a bug, but fixing it is
difficult to do in a safe backwards compatible way, especially due to
flattening.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, height, width, channels]`. Input to pool over.
ksize: An int or list of `ints` that has length `1`, `2` or `4`.
The size of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string`, must be set to `"NHWC"`. Defaults to
`"NHWC"`.
Specify the data format of the input and output data.
output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`.
Defaults to `tf.int64`.
The dtype of the returned argmax tensor.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, argmax).
output: A `Tensor`. Has the same type as `input`.
argmax: A `Tensor` of type `output_dtype`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
ksize = _get_sequence(ksize, 2, 3, "ksize")
strides = _get_sequence(strides, 2, 3, "strides")
return gen_nn_ops.max_pool_with_argmax(input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=output_dtype,
name=name)
@tf_export(v1=["nn.max_pool_with_argmax"])
def max_pool_with_argmax_v1(input, # pylint: disable=missing-docstring,invalid-name
ksize,
strides,
padding,
data_format="NHWC",
Targmax=None, # pylint: disable=invalid-name
name=None,
output_dtype=None):
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
Targmax = deprecated_argument_lookup(
"output_dtype", output_dtype, "Targmax", Targmax)
if Targmax is None:
Targmax = dtypes.int64
return gen_nn_ops.max_pool_with_argmax(
input, ksize, strides, padding, Targmax, name)
max_pool_with_argmax_v1.__doc__ = gen_nn_ops.max_pool_with_argmax.__doc__
# pylint: enable=redefined-builtin
@ops.RegisterStatistics("Conv2D", "flops")
def _calc_conv_flops(graph, node):
"""Calculates the compute resources needed for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats(
"flops",
(output_count * filter_in_depth * filter_height * filter_width * 2))
@ops.RegisterStatistics("DepthwiseConv2dNative", "flops")
def _calc_depthwise_conv_flops(graph, node):
"""Calculates the compute resources needed for DepthwiseConv2dNative."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@tf_export(v1=["nn.xw_plus_b"])
def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
def xw_plus_b_v1(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
This is a deprecated version of that will soon be removed.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b_v1" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b_v1", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
def _get_noise_shape(x, noise_shape):
# If noise_shape is none return immediately.
if noise_shape is None:
return array_ops.shape(x)
try:
# Best effort to figure out the intended shape.
# If not possible, let the op to handle it.
# In eager mode exception will show up.
noise_shape_ = tensor_shape.as_shape(noise_shape)
except (TypeError, ValueError):
return noise_shape
if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):
new_dims = []
for i, dim in enumerate(x.shape.dims):
if noise_shape_.dims[i].value is None and dim.value is not None:
new_dims.append(dim.value)
else:
new_dims.append(noise_shape_.dims[i].value)
return tensor_shape.TensorShape(new_dims)
return noise_shape
@tf_export(v1=["nn.dropout"])
@deprecation.deprecated_args(None, "Please use `rate` instead of `keep_prob`. "
"Rate should be set to `rate = 1 - keep_prob`.",
"keep_prob")
def dropout(x, keep_prob=None, noise_shape=None, seed=None, name=None,
rate=None): # pylint: disable=invalid-name
"""Computes dropout.
For each element of `x`, with probability `rate`, outputs `0`, and otherwise
scales up the input by `1 / (1-rate)`. The scaling is such that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A floating point tensor.
keep_prob: (deprecated) A deprecated alias for `(1-rate)`.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
name: A name for this operation (optional).
rate: A scalar `Tensor` with the same type as `x`. The probability that each
element of `x` is discarded.
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating
point tensor.
"""
try:
keep = 1. - keep_prob if keep_prob is not None else None
except TypeError:
raise ValueError("keep_prob must be a floating point number or Tensor "
"(got %r)" % keep_prob)
rate = deprecation.deprecated_argument_lookup(
"rate", rate,
"keep_prob", keep)
if rate is None:
raise ValueError("You must provide a rate to dropout.")
return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
@tf_export("nn.dropout", v1=[])
def dropout_v2(x, rate, noise_shape=None, seed=None, name=None): # pylint: disable=invalid-name
"""Computes dropout.
With probability `rate`, drops elements of `x`. Input that are kept are
scaled up by `1 / (1 - rate)`, otherwise outputs `0`. The scaling is so that
the expected sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A floating point tensor.
rate: A scalar `Tensor` with the same type as x. The probability
that each element is dropped. For example, setting rate=0.1 would drop
10% of input elements.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `keep_prob` is not in `(0, 1]` or if `x` is not a floating
point tensor.
"""
with ops.name_scope(name, "dropout", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if not x.dtype.is_floating:
raise ValueError("x has to be a floating point tensor since it's going to"
" be scaled. Got a %s tensor instead." % x.dtype)
if isinstance(rate, numbers.Real) and not (rate >= 0 and rate < 1):
raise ValueError("rate must be a scalar tensor or a float in the "
"range [0, 1), got %g" % rate)
# Early return if nothing needs to be dropped.
if isinstance(rate, numbers.Real) and rate == 0:
return x
if context.executing_eagerly():
if isinstance(rate, ops.EagerTensor):
if rate.numpy() == 0:
return x
else:
rate = ops.convert_to_tensor(
rate, dtype=x.dtype, name="rate")
rate.get_shape().assert_is_compatible_with(tensor_shape.scalar())
# Do nothing if we know rate == 0
if tensor_util.constant_value(rate) == 0:
return x
noise_shape = _get_noise_shape(x, noise_shape)
# Sample a uniform distribution on [0.0, 1.0) and select values larger than
# rate.
#
# NOTE: Random uniform actually can only generate 2^23 floats on [1.0, 2.0)
# and subtract 1.0.
random_tensor = random_ops.random_uniform(
noise_shape, seed=seed, dtype=x.dtype)
keep_prob = 1 - rate
scale = 1 / keep_prob
# NOTE: if (1.0 + rate) - 1 is equal to rate, then we want to consider that
# float to be selected, hence we use a >= comparison.
keep_mask = random_tensor >= rate
ret = x * scale * math_ops.cast(keep_mask, x.dtype)
if not context.executing_eagerly():
ret.set_shape(x.get_shape())
return ret
@tf_export("math.top_k", "nn.top_k")
def top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin
"""Finds values and indices of the `k` largest entries for the last dimension.
If the input is a vector (rank=1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
Args:
input: 1-D or higher `Tensor` with last dimension at least `k`.
k: 0-D `int32` `Tensor`. Number of top elements to look for along the last
dimension (along each row for matrices).
sorted: If true the resulting `k` elements will be sorted by the values in
descending order.
name: Optional name for the operation.
Returns:
values: The `k` largest elements along each last dimensional slice.
indices: The indices of `values` within the last dimension of `input`.
"""
return gen_nn_ops.top_kv2(input, k=k, sorted=sorted, name=name)
def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin
r"""Finds values of the `n`-th order statistic for the last dmension.
If the input is a vector (rank-1), finds the entries which is the nth-smallest
value in the vector and outputs their values as scalar tensor.
For matrices (resp. higher rank input), computes the entries which is the
nth-smallest value in each row (resp. vector along the last dimension). Thus,
values.shape = input.shape[:-1]
Args:
input: 1-D or higher `Tensor` with last dimension at least `n+1`.
n: A `Tensor` of type `int32`.
0-D. Position of sorted vector to select along the last dimension (along
each row for matrices). Valid range of n is `[0, input.shape[:-1])`
reverse: An optional `bool`. Defaults to `False`.
When set to True, find the nth-largest value in the vector and vice
versa.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
The `n`-th order statistic along each last dimensional slice.
"""
return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name)
@tf_export(v1=["nn.fractional_max_pool"])
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_max_pool_v2.")
def fractional_max_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
This is a deprecated version of `fractional_max_pool`.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
For more details on fractional max pooling, see this paper: [Benjamin Graham,
Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_max_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_max_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name)
@tf_export("nn.fractional_max_pool", v1=[])
def fractional_max_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
For more details on fractional max pooling, see this paper: [Benjamin Graham,
Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: An int or list of `ints` that has length `1`, `2` or `4`.
Pooling ratio for each dimension of `value`, currently only supports row
and col dimension and should be >= 1.0. For example, a valid pooling ratio
looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0
because we don't allow pooling on batch and channels dimensions. 1.44 and
1.73 are pooling ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
pooling_ratio = _get_sequence(pooling_ratio, 2, 3, "pooling_ratio")
if seed == 0:
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.fractional_avg_pool"])
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_avg_pool_v2.")
def fractional_avg_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
This is a deprecated version of `fractional_avg_pool`.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_avg_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_avg_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name=name)
@tf_export("nn.fractional_avg_pool", v1=[])
def fractional_avg_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
if seed == 0:
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.conv1d"])
@deprecation.deprecated_arg_values(
None,
"`NCHW` for data_format is deprecated, use `NCW` instead",
warn_once=True,
data_format="NCHW")
@deprecation.deprecated_arg_values(
None,
"`NHWC` for data_format is deprecated, use `NWC` instead",
warn_once=True,
data_format="NHWC")
def conv1d(
value=None,
filters=None,
stride=None,
padding=None,
use_cudnn_on_gpu=None,
data_format=None,
name=None,
input=None, # pylint: disable=redefined-builtin
dilations=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
[batch, in_width, in_channels]
if data_format is "NWC", or
[batch, in_channels, in_width]
if data_format is "NCW",
and a filter / kernel tensor of shape
[filter_width, in_channels, out_channels], this op reshapes
the arguments to pass them to conv2d to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
[batch, in_width, in_channels]
is reshaped to
[batch, 1, in_width, in_channels],
and the filter is reshaped to
[1, filter_width, in_channels, out_channels].
The result is then reshaped back to
[batch, out_width, out_channels]
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
value: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`.
filters: A 3D `Tensor`. Must have the same type as `value`.
stride: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: 'SAME' or 'VALID'
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`,
the data is stored in the order of [batch, in_width, in_channels]. The
`"NCW"` format stores data as [batch, in_channels, in_width].
name: A name for the operation (optional).
input: Alias for value.
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
value = deprecation.deprecated_argument_lookup("input", input, "value", value)
with ops.name_scope(name, "conv1d", [value, filters]) as name:
# Reshape the input tensor to [batch, 1, in_width, in_channels]
if data_format is None or data_format == "NHWC" or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = 1
channel_index = 2
elif data_format == "NCHW" or data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = 2
channel_index = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
strides = [1] + _get_sequence(stride, 1, channel_index, "stride")
dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations")
value = array_ops.expand_dims(value, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
result = gen_nn_ops.conv2d(
value,
filters,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations,
name=name)
return array_ops.squeeze(result, [spatial_start_dim])
@tf_export("nn.conv1d", v1=[])
def conv1d_v2(
input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
data_format="NWC",
dilations=None,
name=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
[batch, in_width, in_channels]
if data_format is "NWC", or
[batch, in_channels, in_width]
if data_format is "NCW",
and a filter / kernel tensor of shape
[filter_width, in_channels, out_channels], this op reshapes
the arguments to pass them to conv2d to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
[batch, in_width, in_channels]
is reshaped to
[batch, 1, in_width, in_channels],
and the filter is reshaped to
[1, filter_width, in_channels, out_channels].
The result is then reshaped back to
[batch, out_width, out_channels]
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
input: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`.
filters: A 3D `Tensor`. Must have the same type as `input`.
stride: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: 'SAME' or 'VALID'
data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`,
the data is stored in the order of [batch, in_width, in_channels]. The
`"NCW"` format stores data as [batch, in_channels, in_width].
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
return conv1d(
input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
name=name,
dilations=dilations)
@tf_export("nn.conv1d_transpose")
def conv1d_transpose(
input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format="NWC",
dilations=None,
name=None):
"""The transpose of `conv1d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv1d` rather than an actual
deconvolution.
Args:
input: A 3-D `Tensor` of type `float` and shape
`[batch, in_width, in_channels]` for `NWC` data format or
`[batch, in_channels, in_width]` for `NCW` data format.
filters: A 3-D `Tensor` with the same type as `value` and shape
`[filter_width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor`, containing three elements, representing the
output shape of the deconvolution op.
strides: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. `'NWC'` and `'NCW'` are supported.
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, if
`output_shape` is not at 3-element vector, if `padding` is other than
`'VALID'` or `'SAME'`, or if `data_format` is invalid.
"""
with ops.name_scope(name, "conv1d_transpose",
[input, filters, output_shape]) as name:
# The format could be either NWC or NCW, map to NHWC or NCHW
if data_format is None or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = 1
channel_index = 2
elif data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = 2
channel_index = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
# Reshape the input tensor to [batch, 1, in_width, in_channels]
strides = [1] + _get_sequence(strides, 1, channel_index, "stride")
dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations")
input = array_ops.expand_dims(input, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0) # pylint: disable=redefined-builtin
output_shape = list(output_shape)
output_shape = output_shape[: spatial_start_dim] + [1] + \
output_shape[spatial_start_dim:]
result = gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
return array_ops.squeeze(result, spatial_start_dim)
@ops.RegisterStatistics("Dilation2D", "flops")
def _calc_dilation2d_flops(graph, node):
"""Calculates the compute resources needed for Dilation2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@tf_export(v1=["nn.erosion2d"])
def erosion2d(value, kernel, strides, rates, padding, name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - rates[1] * dy,
strides[2] * x - rates[2] * dx,
c] -
kernel[dy, dx, c]
Duality: The erosion of `value` by the `kernel` is equal to the negation of
the dilation of `-value` by the reflected `kernel`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
kernel: A `Tensor`. Must have the same type as `value`.
3-D with shape `[kernel_height, kernel_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `kernel`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "erosion2d", [value, kernel]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(kernel, [0, 1]),
strides=strides,
rates=rates,
padding=padding,
name=name))
@tf_export("nn.erosion2d", v1=[])
def erosion2d_v2(value,
filters,
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `filters` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filters_height, filters_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - dilations[1] * dy,
strides[2] * x - dilations[2] * dx,
c] -
filters[dy, dx, c]
Duality: The erosion of `value` by the `filters` is equal to the negation of
the dilation of `-value` by the reflected `filters`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `value`.
3-D with shape `[filters_height, filters_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NHWC"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
with ops.name_scope(name, "erosion2d", [value, filters]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(filters, [0, 1]),
strides=strides,
rates=dilations,
padding=padding,
name=name))
@tf_export(v1=["math.in_top_k", "nn.in_top_k"])
def in_top_k(predictions, targets, k, name=None):
r"""Says whether the targets are in the top `K` predictions.
This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
prediction for the target class is among the top `k` predictions among
all predictions for example `i`. Note that the behavior of `InTopK` differs
from the `TopK` op in its handling of ties; if multiple classes have the
same prediction value and straddle the top-`k` boundary, all of those
classes are considered to be in the top `k`.
More formally, let
\\(predictions_i\\) be the predictions for all classes for example `i`,
\\(targets_i\\) be the target class for example `i`,
\\(out_i\\) be the output for example `i`,
$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
Args:
predictions: A `Tensor` of type `float32`.
A `batch_size` x `classes` tensor.
targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `batch_size` vector of class ids.
k: An `int`. Number of top elements to look at for computing precision.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`. Computed Precision at `k` as a `bool Tensor`.
"""
with ops.name_scope(name, "in_top_k"):
return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name)
@tf_export("math.in_top_k", "nn.in_top_k", v1=[])
def in_top_k_v2(targets, predictions, k, name=None):
return in_top_k(predictions, targets, k, name)
in_top_k_v2.__doc__ = in_top_k.__doc__
tf_export(v1=["nn.quantized_avg_pool"])(gen_nn_ops.quantized_avg_pool)
tf_export(v1=["nn.quantized_conv2d"])(gen_nn_ops.quantized_conv2d)
tf_export(v1=["nn.quantized_relu_x"])(gen_nn_ops.quantized_relu_x)
tf_export(v1=["nn.quantized_max_pool"])(gen_nn_ops.quantized_max_pool)
|
# coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Constants used in gradient based inference with psl constraints."""
# Specify test data here.
DEFAULT_DATA_PATH = ''
MULTIWOZ_RULE_WEIGHTS = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
MULTIWOZ_CONFIG = {
'data_path': DEFAULT_DATA_PATH,
'default_seed': 5,
'batch_size': 128,
'max_dialog_size': 10,
'max_utterance_size': 40,
'greet_words': ['hello', 'hi'],
'end_words': ['thank', 'thanks'],
'class_map': {
'accept': 0,
'cancel': 1,
'end': 2,
'greet': 3,
'info_question': 4,
'init_request': 5,
'insist': 6,
'second_request': 7,
'slot_question': 8,
},
'contains_word': -1,
'excludes_word': -2,
'greet_index': 0,
'end_index': 1,
}
|
"""RP To-Do entry point script."""
# rptodo/__main__.py
# -*- coding: utf-8 -*-
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from rptodo import cli, __app_name__
def main():
cli.app(prog_name=__app_name__)
if __name__ == '__main__':
main()
|
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from mmcv.runner import auto_fp16
from .. import builder
class BaseRecognizer(nn.Module, metaclass=ABCMeta):
"""Base class for recognizers.
All recognizers should subclass it.
All subclass should overwrite:
- Methods:``forward_train``, supporting to forward when training.
- Methods:``forward_test``, supporting to forward when testing.
Args:
backbone (dict): Backbone modules to extract feature.
cls_head (dict): Classification head to process feature.
train_cfg (dict | None): Config for training. Default: None.
test_cfg (dict | None): Config for testing. Default: None.
"""
def __init__(self,
backbone,
cls_head,
neck=None,
train_cfg=None,
test_cfg=None):
super().__init__()
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
self.cls_head = builder.build_head(cls_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
# aux_info is the list of tensor names beyond 'imgs' and 'label' which
# will be used in train_step and val_step, data_batch should contain
# these tensors
self.aux_info = []
if train_cfg is not None and 'aux_info' in train_cfg:
self.aux_info = train_cfg['aux_info']
self.init_weights()
self.fp16_enabled = False
def init_weights(self):
"""Initialize the model network weights."""
self.backbone.init_weights()
self.cls_head.init_weights()
if hasattr(self, 'neck'):
self.neck.init_weights()
@auto_fp16()
def extract_feat(self, imgs):
"""Extract features through a backbone.
Args:
imgs (torch.Tensor): The input images.
Returns:
torch.tensor: The extracted features.
"""
x = self.backbone(imgs)
return x
def average_clip(self, cls_score, num_segs=1):
"""Averaging class score over multiple clips.
Using different averaging types ('score' or 'prob' or None,
which defined in test_cfg) to computed the final averaged
class score.
Args:
cls_score (torch.Tensor): Class score to be averaged.
Returns:
torch.Tensor: Averaged class score.
"""
if 'average_clips' not in self.test_cfg.keys():
raise KeyError('"average_clips" must defined in test_cfg\'s keys')
average_clips = self.test_cfg['average_clips']
if average_clips not in ['score', 'prob', None]:
raise ValueError(f'{average_clips} is not supported. '
f'Currently supported ones are '
f'["score", "prob", None]')
if average_clips is None:
return cls_score
batch_size = cls_score.shape[0]
cls_score = cls_score.view(batch_size // num_segs, num_segs, -1)
if average_clips == 'prob':
cls_score = F.softmax(cls_score, dim=2).mean(dim=1)
elif average_clips == 'score':
cls_score = cls_score.mean(dim=1)
return cls_score
@abstractmethod
def forward_train(self, imgs, labels, **kwargs):
"""Defines the computation performed at every call when training."""
pass
@abstractmethod
def forward_test(self, imgs):
"""Defines the computation performed at every call when evaluation and
testing."""
pass
@staticmethod
def _parse_losses(losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
which may be a weighted sum of all losses, log_vars contains
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def forward(self, imgs, label=None, return_loss=True, **kwargs):
"""Define the computation performed at every call."""
if return_loss:
if label is None:
raise ValueError('Label should not be None.')
return self.forward_train(imgs, label, **kwargs)
else:
return self.forward_test(imgs, **kwargs)
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data_batch (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
imgs = data_batch['imgs']
label = data_batch['label']
# if 'indexes' in kwargs:
# indexes = kwargs.get('indexes')
aux_info = {}
for item in self.aux_info:
assert item in data_batch
aux_info[item] = data_batch[item]
losses = self(imgs, label, return_loss=True, **aux_info)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
def val_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
imgs = data_batch['imgs']
label = data_batch['label']
aux_info = {}
for item in self.aux_info:
aux_info[item] = data_batch[item]
losses = self(imgs, label, return_loss=True, **aux_info)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
|
import cudamat as cm
import gpu_lock2 as gpu_lock
import h5py
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
from time import sleep
import pdb
import datetime
import time
import config_pb2
from google.protobuf import text_format
class Param(object):
def __init__(self, w, config=None):
if type(w) == np.ndarray:
self.w_ = cm.CUDAMatrix(w)
elif type(w) == tuple:
self.w_ = cm.empty(w)
else:
self.w_ = w
self.dw_ = cm.empty_like(self.w_)
self.dw_history_ = cm.empty_like(self.w_)
self.dw_history_.assign(0)
self.dw_.assign(0)
self.t_ = 0
self.rms_prop_ = config.rms_prop
self.rms_prop_factor_ = config.rms_prop_factor
if self.rms_prop_:
self.rms_prop_history_ = cm.empty_like(self.dw_)
self.rms_prop_history_.assign(1)
if config is None:
pass
elif config.init_type == config_pb2.Param.CONSTANT:
self.w_.assign(config.scale)
elif config.init_type == config_pb2.Param.GAUSSIAN:
self.w_.fill_with_randn()
self.w_.mult(config.scale)
elif config.init_type == config_pb2.Param.UNIFORM:
self.w_.fill_with_rand()
self.w_.subtract(0.5)
self.w_.mult(2 * config.scale)
elif config.init_type == config_pb2.Param.LSTM_BIAS:
init_bias = [config.input_gate_bias, config.forget_gate_bias, config.input_bias, config.output_gate_bias]
self.w_.reshape((-1, 4))
for i in xrange(4):
self.w_.slice(i, (i+1)).assign(init_bias[i])
self.w_.reshape((-1, 1))
elif config.init_type == config_pb2.Param.PRETRAINED:
f = h5py.File(config.file_name)
mat = f[config.dataset_name].value
if len(mat.shape) == 1:
mat = mat.reshape(1, -1)
assert self.w_.shape == mat.shape
self.w_.overwrite(mat)
f.close()
else:
raise Exception('Unknown parameter initialization.')
self.eps_ = config.epsilon
self.momentum_ = config.momentum
self.l2_decay_ = config.l2_decay
self.gradient_clip_ = config.gradient_clip
self.eps_decay_factor = config.eps_decay_factor
self.eps_decay_after = config.eps_decay_after
def __repr__(self):
return self.w_.asarray().__repr__()
def __str__(self):
return self.w_.asarray().__str__()
def Load(self, f, name):
if name in f.keys():
self.w_.overwrite(f[name].value)
self.dw_history_.overwrite(f['%s_grad' % name].value)
if self.rms_prop_:
self.rms_prop_history_.overwrite(f['%s_rms_prop' % name].value)
self.t_ = f.attrs.get('%s_t' % name, 0)
else:
print "%s not found." % name
def Save(self, f, name):
w_dset = f.create_dataset(name, self.w_.shape, dtype=np.float32)
w_dset[:, :] = self.w_.asarray()
w_dset = f.create_dataset('%s_grad' % name, self.dw_history_.shape, dtype=np.float32)
w_dset[:, :] = self.dw_history_.asarray()
if self.rms_prop_:
w_dset = f.create_dataset('%s_rms_prop' % name, self.rms_prop_history_.shape, dtype=np.float32)
w_dset[:, :] = self.rms_prop_history_.asarray()
f.attrs.__setitem__('%s_t' % name, self.t_)
def GetW(self):
return self.w_
def GetdW(self):
return self.dw_
def Update(self):
if self.eps_decay_after > 0:
eps = self.eps_ * np.power(self.eps_decay_factor, self.t_ / self.eps_decay_after)
else:
eps = self.eps_
self.dw_history_.mult(self.momentum_)
if self.l2_decay_ > 0:
self.dw_.add_mult(self.w_, mult=self.l2_decay_)
if self.rms_prop_:
self.rms_prop_history_.rms_prop(self.dw_, self.rms_prop_factor_)
self.dw_.divide(self.rms_prop_history_)
self.dw_history_.add_mult(self.dw_, -self.eps_)
if self.gradient_clip_ > 0:
self.dw_history_.upper_bound_mod(self.gradient_clip_)
self.w_.add(self.dw_history_)
self.t_ += 1
def ReadDataProto(fname):
data_pb = config_pb2.Data()
with open(fname, 'r') as pbtxt:
text_format.Merge(pbtxt.read(), data_pb)
return data_pb
def ReadModelProto(fname):
data_pb = config_pb2.Model()
with open(fname, 'r') as pbtxt:
text_format.Merge(pbtxt.read(), data_pb)
return data_pb
def WritePbtxt(proto, fname):
with open(fname, 'w') as f:
text_format.PrintMessage(proto, f)
def LockGPU(max_retries=10, board=-1):
retry_count = 0
while board == -1 and retry_count < max_retries:
board = gpu_lock.obtain_lock_id()
if board == -1:
sleep(1)
retry_count += 1
if board == -1:
print 'No GPU board available.'
sys.exit(1)
else:
cm.cuda_set_device(board)
cm.cublas_init()
return board
def FreeGPU(board):
cm.cublas_shutdown()
|
from ber_public import cli
def test_cli_template():
assert cli.cli() is None
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_hagerstrand.ipynb (unless otherwise specified).
__all__ = ['Diffusion', 'SimpleDiffusion', 'AdvancedDiffusion']
# Cell
import sys
from random import randint
from random import uniform
import numpy as np
from scipy.spatial.distance import cdist
from skimage import data, io, filters
sys.setrecursionlimit(11500)
# Cell
class Diffusion(object):
"""General class for all types of diffusion"""
#por lo pronto solo la creación del espacio se deriva a las clases hijas?
def __init__(self,mif_size=5,pob=20,initial_diff=[(50,50)],
p0=0.3, max_iter=15):
self._pob = pob
self._p0 = p0
self.max_iter = max_iter
self.mif_size = mif_size
self.iteration = 0
self._infected_pop = []
self._tmp_adopted = []
self._clean = False
self._initial_diff = initial_diff
self.time_series = []
self.mif_size = mif_size
def initialize_mif(self,mif_size):
"""Initialize the MIF"""
x = np.linspace(0.5,mif_size - 0.5,mif_size)
y = np.linspace(0.5,mif_size - 0.5,mif_size)
xv,yv = np.meshgrid(x,y)
points = np.array(list(zip(np.ravel(xv),np.ravel(yv))))
center = np.array([[mif_size/2 + 0.5,mif_size/2 + 0.5]])
#print(points)
#print(center)
dist = cdist(center,points)
dist = dist/np.sum(dist)
#Everything: has to be different to respect the user's p0
# print(type(mif_size), type(mif_size/2), mif_size/2)
dist.reshape(mif_size, mif_size)[int(mif_size/2 + 0.5), int(mif_size/2 + 0.5)] = self._p0
dist = dist/np.sum(dist)
return np.cumsum(dist)
def _mif2delta(self,index):
"""Returns a tuple with the increments to get to the propagated frame."""
return np.unravel_index(index,(self.mif_size,self.mif_size))
def _select_from_mif(self):
"""Returns an address (pob_adress) from the MIF."""
rnd = uniform(0,1)
index = np.nonzero(self._mif>rnd)[0][0]
return self._mif2delta(index)
def _clean_adopters(self):
"""Clean and initialize before a new simulation."""
self._infected_pop = []
self._tmp_adopted = []
self._pop_array = np.zeros((len(np.ravel(self.space)),self._pob),
dtype=np.bool)
self.time_series = []
for c in self._initial_diff:
self.space[c[0],c[1]] = 1
#We also modify the original settlers:
index = self._space2pop_index(c)
self._pop_array[index][0] = True
self._infected_pop.append((index,0))
self._clean = False
# Cell
class SimpleDiffusion(Diffusion):
"""Simple model of spatial diffusion based on Hägerstrand.
1.- Homogeneous and isotropic space
2.- A single initial diffuser
3.- ....other assumptions...
:param N: int Number of rows in simulation space.
:param M: int Number of columns in simulation space.
:param mif_size: int MIF matrix (square) size (must be non).
:param pob: int population in each cell.
:param initial_diff: [(int,int)] Coordinate list of start diffusers.
:param p0: float Probability of self-diffusion.
:param max_iter: int Maximum number of iterations.
:attribute space: np.array(M,N,dtype=np.int8) Available space.
:attribute _pop_array: np.array(M*N,pob,dtype=np.bool) array of population in each cell
:attribute _infected_pop: list (space_idx,int) List of the adopting cell indices.
The first entry is the flattened index of the cell
in the space array and the second is the number of
the settler in pop_array. That is, the list of addresses
of each infected resident.
:attribute results: np.array((M,N,max_iter)) Save the results of each iteration.
:attribute time_series: list int Propagations for each iteration.
:attribute _clean: bool Indicates if we have saved results.
"""
def __init__(self,N=100,M=100,mif_size=5,pob=20,initial_diff=[(50,50)],
p0=0.3, max_iter=15):
super().__init__(mif_size, pob, initial_diff, p0, max_iter)
# super(SimpleDiffusion,self).__init__(mif_size,pob,initial_diff,
# p0, max_iter)
self.M = M
self.N = N
self.space = np.zeros((self.N,self.M),dtype=np.int8)
self._pop_array = np.zeros((len(np.ravel(self.space)),pob),
dtype=np.bool)
self.result = np.zeros((M,N,max_iter),dtype=np.int8)
for c in initial_diff:
if c[0] > M or c[1] > N:
raise ValueError("The coordinates on the starting difusors do not belong to the space")
#Modificamos también a los pobladores originales:
index = self._space2pop_index(c)
self._pop_array[index][0] = True
self._infected_pop.append((index,0))
if self.mif_size%2 == 0:
raise ValueError("MIF size must be non")
else:
self._mif = self.initialize_mif(self.mif_size)
def initialize_mif(self,mif_size):
return super(SimpleDiffusion,self).initialize_mif(self.mif_size)
def _propagate(self,pob_adress):
"""It propagates towards the inhabitant in pob_adress if it is non-adopter.
:param pob_adress: (int,int) the address of the inhabitant to propagate.
The first entry is the index (flattened) in space and
the second is the number of the settler in the cell
"""
#checo si es no-adoptante
if self._pop_array[pob_adress[0]][pob_adress[1]] == False:
self._pop_array[pob_adress[0]][pob_adress[1]] = True
self._tmp_adopted.append(pob_adress)
#print "infecté al " + str(pob_adress)
else:
pass
def _space2pop_index(self,index):
"""Transform the index of space into the index of the pop_array.
:param index (int,int) the index to transform
"""
# print(type(index), index)
return np.ravel_multi_index(index,dims=(self.M,self.N))
def _pop2space_index(self,index):
"""Return the tuple (i,j) that corresponds to the flattened index."""
return np.unravel_index(index, (self.M,self.N))
def _mif2delta(self,index):
"""Returns a tuple with the increments to get to the propagated frame."""
return super(SimpleDiffusion,self)._mif2delta(index)
def _random_adress(self):
"""Returns a random address (pob_adress)."""
return (randint(0,(self.M*self.N) - 1),randint(0,self._pob - 1))
def _select_from_mif(self):
"""Returns an address (pob_adress) from the MIF."""
return super(SimpleDiffusion,self)._select_from_mif()
def _get_propagation_adress(self,adress):
"""Returns a pop_adress address propagated by the MIF"""
#print "Propagó: " + str(adress)
delta = self._select_from_mif()
delta = (delta[0] - int(self.mif_size/2+0.5),delta[1] - int(self.mif_size/2+0.5))
space_adress = self._pop2space_index(adress[0])
prop_space_adress = (space_adress[0] + delta[0],
space_adress[1] + delta[1])
try:
habitant = randint(0,self._pob - 1)
return (self._space2pop_index(prop_space_adress),habitant)
except ValueError:
return self._get_propagation_adress(adress)
def _clean_adopters(self):
"""Clean and initialize before a new simulation."""
return super(SimpleDiffusion,self)._clean_adopters()
def spatial_diffusion(self):
"""Propagate the Hagerstrand way."""
#If we already have results, we must clean and initialize
if self._clean:
self._clean_adopters()
if self.iteration == (self.max_iter or
np.sum(self._pop_array) >= self.M*self.N*self._pob):
print("finished")
print("There are %i adopters out of a total of %i inhabitants" \
% (np.sum(self._pop_array),self.M*self.N*self._pob))
print("The total number of iterations performed is: %i" % self.iteration)
self.iteration = 0
self._clean = True
return None
else:
for adress in self._infected_pop:
propagated_adress = self._get_propagation_adress(adress)
self._propagate(propagated_adress)
self._infected_pop.extend(self._tmp_adopted)
#print "Hay %i adoptantes" % len(self._infected_pop)
self.result[:,:,self.iteration] = np.sum(self._pop_array,
axis=1).reshape(self.M,self.N)
self.time_series.append(len(self._tmp_adopted))
self.iteration += 1
self._tmp_adopted = []
return self.spatial_diffusion()
def random_diffusion(self):
"""Randomly propagates in space."""
#If we already have results, we must clean and initialize
if self._clean:
self._clean_adopters()
if self.iteration == (self.max_iter or
np.sum(self._pop_array) >= self.M*self.N*self._pob):
#self.space = np.sum(s._pop_array,axis=1).reshape(s.M,s.N)
print("finished")
print("There are %i adopters out of a total of %i inhabitants" \
% (np.sum(self._pop_array),self.M*self.N*self._pob))
print("The total number of iterations performed is: %i" % self.iteration)
self.iteration = 0
self._clean = True
return None
else:
for adress in self._infected_pop:
rand_adress = self._random_adress()
if adress == rand_adress:
#TODO: you have to change, it could happen to get twice the same
rand_adress = self._random_adress()
self._propagate(rand_adress)
self._infected_pop.extend(self._tmp_adopted)
#print "Hay %i adoptantes" % len(self._infected_pop)
self.result[:,:,self.iteration] = np.sum(self._pop_array,
axis=1).reshape(self.M,self.N)
self.time_series.append(len(self._tmp_adopted))
self.iteration += 1
self._tmp_adopted = []
return self.random_diffusion()
def mixed_diffusion(self,proportion=0.5):
""" Mix the two types of diffusion.
In each iteration he randomly chooses, according to proportion, the
points that diffuse randomly and those that do so spatially.
:param proportion: float Proportion of adopters who diffuse spatially.
"""
if proportion < 0 or proportion > 1:
raise ValueError("The proportion must be between 0 and 1.")
#If we already have results, we must clean and initialize
if self._clean:
self._clean_adopters()
if self.iteration == (self.max_iter or
np.sum(self._pop_array) >= self.M*self.N*self._pob):
#self.space = np.sum(s._pop_array,axis=1).reshape(s.M,s.N)
print("finished")
print("There are %i adopters out of a total of %i inhabitants" \
% (np.sum(self._pop_array),self.M*self.N*self._pob))
print("The total number of iterations performed is: %i" % self.iteration)
self.iteration = 0
self._clean = True
return None
else:
for adress in self._infected_pop:
rnd = uniform(0,1)
if rnd <= proportion:
propagated_adress = self._get_propagation_adress(adress)
self._propagate(propagated_adress)
else:
rand_adress = self._random_adress()
if adress == rand_adress:
#TODO: you have to change, it could happen to get twice the same
rand_adress = self._random_adress()
self._propagate(rand_adress)
self._infected_pop.extend(self._tmp_adopted)
#print "There are %i adopters %i len(self._infected_pop)
self.result[:,:,self.iteration] = np.sum(self._pop_array,
axis=1).reshape(self.M,self.N)
self.time_series.append(len(self._tmp_adopted))
self.iteration += 1
self._tmp_adopted = []
return self.mixed_diffusion(proportion)
# Cell
class AdvancedDiffusion(Diffusion):
"""Hägerstrand-based spatial diffusion model, with heterogeneous space.
1.- Isotropic space
2.- A single initial diffuser
3.- .... Other assumptions ...
:param N: int Number of rows and columns in the simulation space.
:param mif_size: int MIF matrix size (square) (must be odd).
:param pob: int maximum population at each cell.
:param density: int Number of Number of initial population nuclei.
:param amplitud: float Gaussian filter width to blur the population.
:param initial_diff: [(int,int)] Coordinate list of start diffusers
:param p0: float Auto-difussion probability
:param max_iter: int Maximum number of iterations
:attribute space: np.array(N,N,dtype=np.int8) Available space
:attribute _pop_array: np.array(N*N,pob,dtype=np.bool) array of inhabitants in each cell
:attribute _infected_pop: list (space_idx,int) List of adoptive cell indices.
The first entry is the flattened index of the cell in the space matrix
and the second is the number of the settler in pop_array. That is,
the list of addresses of each infected resident.
:attribute results: np.array((N,N,max_iter)) Save results of each iteration.
:attribute time_series: list int Propagation of each iteration.
:attribute _clean: bool Indicates if Indicates if there are saved results.
"""
def __init__(self,N=100,mif_size=5,pob=20,initial_diff=[(50,50)],
p0=0.3, max_iter=25,density=20,amplitud=4.0):
super(AdvancedDiffusion,self).__init__(mif_size,pob,initial_diff, p0,
max_iter)
self.N = N
self.density = density
self.amplitud = amplitud
self.space = np.zeros((self.N,self.N),dtype=np.int8)
points = self.N * np.random.random((2, self.density ** 2))
self.space[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
self.space = filters.gaussian(self.space, sigma= self.N / (self.amplitud * self.density))
# We rescale to the value of the maximum pop and convert to integer:
self.space *= self._pob / self.space.max()
self.space = self.space.astype(np.int8)
self._pop_array = np.zeros((len(np.ravel(self.space)),self._pob),
dtype=np.bool)
self.result = np.zeros((self.N,self.N,max_iter),dtype=np.int8)
for c in initial_diff:
if c[0] > self.N or c[1] > self.N:
raise ValueError("Coordinates of initial diffusers do not fall in space")
# We also modify original settlers:
index = self._space2pop_index(c)
self._pop_array[index][0] = True
self._infected_pop.append((index,0))
if self.mif_size%2 == 0:
raise ValueError("MIF size must be odd")
else:
self._mif = self.initialize_mif(self.mif_size)
def _space2pop_index(self,index):
"""Transform the index of space into the index of the pop_array.
:param index (int,int) index to transform
"""
return np.ravel_multi_index(index,dims=(self.N,self.N))
def _pop2space_index(self,index):
"""Returns the tuple (i, j) that corresponds to the flattened index."""
return np.unravel_index(index,(self.N,self.N))
def _mif2delta(self,index):
"""Returns the tuple with the increments to get to the propagated frame."""
return super(AdvancedDiffusion,self)._mif2delta(index)
def _select_from_mif(self):
"""Returns an address (pob_adress) from the MIF."""
return super(AdvancedDiffusion,self)._select_from_mif()
def _random_adress(self):
"""Returns a random address (pob_adress)."""
i = randint(0,self.N - 1)
j = randint(0,self.N - 1)
pop_idx = self._space2pop_index((i,j))
return (pop_idx,randint(0,self.space[i,j] - 1))
def _get_propagation_adress(self,adress):
"""Returns an address propagated from the MIF (pop_adress)."""
#print "Propagates: " + str(adress)
delta = self._select_from_mif()
delta = (delta[0] - int(self.mif_size/2+0.5),delta[1] - int(self.mif_size/2+0.5))
space_adress = self._pop2space_index(adress[0])
prop_space_adress = (space_adress[0] + delta[0],
space_adress[1] + delta[1])
try:
# print(prop_space_adress[0],prop_space_adress[1])
# print(self.space[prop_space_adress[0],prop_space_adress[1]])
habitant = randint(0,self.space[prop_space_adress[0],prop_space_adress[1]])
return (self._space2pop_index(prop_space_adress),habitant)
except ValueError as e:
return self._get_propagation_adress(adress)
def _propagate(self,pob_adress):
"""Propagates through inhabitant in pob_adress if it is not-adoptant.
:param pob_adress: (int,int) The direction of inhabitant to propagate.
The first entry is the index (flattened) in space
and the second is the number of the settler in the cell
"""
# Check if it is not-adoptant
try:
if self._pop_array[pob_adress[0]][pob_adress[1]] == False:
self._pop_array[pob_adress[0]][pob_adress[1]] = True
self._tmp_adopted.append(pob_adress)
else:
pass
except IndexError:
# This means we are infecting someone outside the space
pass
def _clean_adopters(self):
"""Clean and initialize before start a new simulation."""
return super(AdvancedDiffusion,self)._clean_adopters()
def spatial_diffusion(self):
"""Propagates Hagerstrand like."""
# If we have results already, we must to clean and initialize
if self._clean:
self._clean_adopters()
if self.iteration == (self.max_iter or
np.sum(self._pop_array) >= self.M*self.N*self._pob):
print("Done")
print("There are %i adoptants from a total of %i inhabitants" \
% (np.sum(self._pop_array),self.N * self.N * self._pob))
print("The total number of iterations performed is %i" % self.iteration)
self.iteration = 0
self._clean = True
return None
else:
for adress in self._infected_pop:
propagated_adress = self._get_propagation_adress(adress)
self._propagate(propagated_adress)
self._infected_pop.extend(self._tmp_adopted)
#print "Hay %i adoptantes" % len(self._infected_pop)
self.result[:,:,self.iteration] = np.sum(self._pop_array,
axis=1).reshape(self.N,self.N)
self.time_series.append(len(self._tmp_adopted))
self.iteration += 1
self._tmp_adopted = []
return self.spatial_diffusion()
def random_diffusion(self):
"""Propagates randomly in space."""
#Si ya tenemos resultados hay que limpiar e inicializar
if self._clean:
self._clean_adopters()
if self.iteration == (self.max_iter or
np.sum(self._pop_array) >= self.N*self.N*self._pob):
#self.space = np.sum(s._pop_array,axis=1).reshape(s.M,s.N)
print("Done")
print("There are %i adoptants from a total of %i inhabitantes" \
% (np.sum(self._pop_array),self.N*self.N*self._pob))
print("The total number of iterations performed is %i" % self.iteration)
self.iteration = 0
self._clean = True
return None
else:
for adress in self._infected_pop:
rand_adress = self._random_adress()
if adress == rand_adress:
#TODO: must change, it could obtain twice the same
rand_adress = self._random_adress()
self._propagate(rand_adress)
self._infected_pop.extend(self._tmp_adopted)
self.result[:,:,self.iteration] = np.sum(self._pop_array,
axis=1).reshape(self.N,self.N)
self.time_series.append(len(self._tmp_adopted))
self.iteration += 1
self._tmp_adopted = []
return self.random_diffusion()
|
"""Utilities for managing IMAP searches."""
import re
from abc import abstractmethod, ABCMeta
from datetime import datetime
from typing import AnyStr, FrozenSet, Optional, Iterable
from typing_extensions import Final
from .exceptions import SearchNotAllowed
from .interfaces.message import MessageInterface
from .parsing.specials import SearchKey, SequenceSet
from .parsing.specials.flag import Flag, Answered, Deleted, Draft, Flagged, \
Recent, Seen
from .selected import SelectedMailbox
__all__ = ['SearchParams', 'SearchCriteria', 'SearchCriteriaSet']
class SearchParams:
"""Defines certain parameters and routines necessary to process any
kind of search criteria. If a parameter is not supplied, or a method not
implemented, any search keys that require it will fail.
Args:
selected: The active mailbox session.
max_seq: The highest message sequence ID in the mailbox.
max_uid: The highest message UID in the mailbox.
disabled: Search keys that should be disabled.
"""
__slots__ = ['selected', 'disabled', 'max_seq', 'max_uid', 'session_flags']
def __init__(self, selected: SelectedMailbox, *,
disabled: Iterable[bytes] = None) -> None:
self.selected: Final = selected
self.disabled: Final = frozenset(disabled or [])
self.max_seq: Final = selected.messages.exists
self.max_uid: Final = selected.messages.max_uid
self.session_flags: Final = selected.session_flags
class SearchCriteria(metaclass=ABCMeta):
"""Base class for different types of search criteria.
Args:
params: The parameters that may be used by some searches.
"""
def __init__(self, params: SearchParams) -> None:
self.params = params
@classmethod
def _in(cls, substr: AnyStr, data: AnyStr) -> bool:
re_flags = re.I | re.A
escaped_substr = re.escape(substr)
return re.search(escaped_substr, data, re_flags) is not None
@abstractmethod
def matches(self, msg_seq: int, msg: MessageInterface) -> bool:
"""Implemented by sub-classes to define the search criteria.
Args:
msg_seq: The message sequence ID.
msg: The message object.
"""
...
@classmethod
def of(cls, key: SearchKey, params: SearchParams) -> 'SearchCriteria':
"""Factory method for producing a search criteria sub-class from a
search key.
Args:
key: The search key defining the criteria.
params: The parameters that may be used by some searches.
"""
key_name = key.value
if key_name in params.disabled:
raise SearchNotAllowed(key_name)
elif key.inverse:
return InverseSearchCriteria(key.not_inverse, params)
elif key_name == b'SEQSET':
return SequenceSetSearchCriteria(key.filter_sequence_set, params)
elif key_name == b'KEYSET':
return SearchCriteriaSet(key.filter_key_set, params)
elif key_name == b'ALL':
return AllSearchCriteria(params)
elif key_name == b'OR':
left_key, right_key = key.filter_key_or
return OrSearchCriteria(left_key, right_key, params)
elif key_name == b'ANSWERED':
return HasFlagSearchCriteria(Answered, True, params)
elif key_name == b'UNANSWERED':
return HasFlagSearchCriteria(Answered, False, params)
elif key_name == b'DELETED':
return HasFlagSearchCriteria(Deleted, True, params)
elif key_name == b'UNDELETED':
return HasFlagSearchCriteria(Deleted, False, params)
elif key_name == b'DRAFT':
return HasFlagSearchCriteria(Draft, True, params)
elif key_name == b'UNDRAFT':
return HasFlagSearchCriteria(Draft, False, params)
elif key_name == b'FLAGGED':
return HasFlagSearchCriteria(Flagged, True, params)
elif key_name == b'UNFLAGGED':
return HasFlagSearchCriteria(Flagged, False, params)
elif key_name == b'RECENT':
return HasFlagSearchCriteria(Recent, True, params)
elif key_name == b'OLD':
return HasFlagSearchCriteria(Recent, False, params)
elif key_name == b'SEEN':
return HasFlagSearchCriteria(Seen, True, params)
elif key_name == b'UNSEEN':
return HasFlagSearchCriteria(Seen, False, params)
elif key_name == b'KEYWORD':
return HasFlagSearchCriteria(key.filter_flag, True, params)
elif key_name == b'UNKEYWORD':
return HasFlagSearchCriteria(key.filter_flag, False, params)
elif key_name == b'NEW':
return NewSearchCriteria(params)
elif key_name == b'BEFORE':
return DateSearchCriteria(key.filter_datetime, '<', params)
elif key_name == b'ON':
return DateSearchCriteria(key.filter_datetime, '=', params)
elif key_name == b'SINCE':
return DateSearchCriteria(key.filter_datetime, '>=', params)
elif key_name == b'SENTBEFORE':
return HeaderDateSearchCriteria(key.filter_datetime, '<', params)
elif key_name == b'SENTON':
return HeaderDateSearchCriteria(key.filter_datetime, '=', params)
elif key_name == b'SENTSINCE':
return HeaderDateSearchCriteria(key.filter_datetime, '>=', params)
elif key_name == b'SMALLER':
return SizeSearchCriteria(key.filter_int, '<', params)
elif key_name == b'LARGER':
return SizeSearchCriteria(key.filter_int, '>', params)
elif key_name in (b'BCC', b'CC', b'FROM', b'SUBJECT', b'TO'):
return EnvelopeSearchCriteria(key_name, key.filter_str, params)
elif key_name == b'HEADER':
name, value = key.filter_header
return HeaderSearchCriteria(name, value, params)
elif key_name in (b'BODY', b'TEXT'):
return BodySearchCriteria(key.filter_str, params)
raise SearchNotAllowed(key_name)
class SearchCriteriaSet(SearchCriteria):
"""Search criteria composed of a set of search criteria that must all
match. If the set is empty, nothing will match.
Args:
keys: The set of search keys that must match.
params: The parameters that may be used by some searches.
"""
def __init__(self, keys: FrozenSet[SearchKey],
params: SearchParams) -> None:
super().__init__(params)
self.all_criteria = [SearchCriteria.of(key, params) for key in keys]
@property
def sequence_set(self) -> SequenceSet:
"""The sequence set to use when finding the messages to match against.
This will default to all messages unless the search criteria set
contains a sequence set.
"""
try:
seqset_crit = next(crit for crit in self.all_criteria
if isinstance(crit, SequenceSetSearchCriteria))
except StopIteration:
return SequenceSet.all()
else:
return seqset_crit.seq_set
def matches(self, msg_seq: int, msg: MessageInterface) -> bool:
"""The message matches if all the defined search key criteria match.
Args:
msg_seq: The message sequence ID.
msg: The message object.
"""
return all(crit.matches(msg_seq, msg) for crit in self.all_criteria)
class InverseSearchCriteria(SearchCriteria):
"""Matches only if the given search criteria does not match."""
def __init__(self, key: SearchKey, params: SearchParams) -> None:
super().__init__(params)
self.key = SearchCriteria.of(key, params)
def matches(self, msg_seq: int, msg: MessageInterface) -> bool:
return not self.key.matches(msg_seq, msg)
class AllSearchCriteria(SearchCriteria):
"""Always matches anything."""
def matches(self, msg_seq: int, msg: MessageInterface):
return True
class OrSearchCriteria(SearchCriteria):
"""Matches if either of the search criteria match."""
def __init__(self, left: SearchKey, right: SearchKey,
params: SearchParams) -> None:
super().__init__(params)
self.left = SearchCriteria.of(left, self.params)
self.right = SearchCriteria.of(right, self.params)
def matches(self, msg_seq: int, msg: MessageInterface) -> bool:
return (self.left.matches(msg_seq, msg)
or self.right.matches(msg_seq, msg))
class SequenceSetSearchCriteria(SearchCriteria):
"""Matches if the message is contained in the sequence set."""
def __init__(self, seq_set: SequenceSet, params: SearchParams) -> None:
super().__init__(params)
self.seq_set = seq_set
if seq_set.uid:
self.flat = seq_set.flatten(params.max_uid)
else:
self.flat = seq_set.flatten(params.max_seq)
def matches(self, msg_seq: int, msg: MessageInterface) -> bool:
if self.seq_set.uid:
return msg.uid in self.flat
else:
return msg_seq in self.flat
class HasFlagSearchCriteria(SearchCriteria):
"""Matches if the message has the given flag in their permanent or
session flag sets.
"""
def __init__(self, flag: Flag, expected: bool,
params: SearchParams) -> None:
super().__init__(params)
self.flag = flag
self.expected = expected
def matches(self, msg_seq: int, msg: MessageInterface) -> bool:
has_flag = self.flag in msg.get_flags(self.params.session_flags)
expected = self.expected
return (has_flag and expected) or (not expected and not has_flag)
class NewSearchCriteria(SearchCriteria):
"""Matches if the message is considered "new", i.e. recent and unseen."""
def matches(self, msg_seq: int, msg: MessageInterface) -> bool:
flags = msg.get_flags(self.params.session_flags)
return Recent in flags and Seen not in flags
class DateSearchCriteria(SearchCriteria):
"""Matches by comparing against the internal date of the message."""
def __init__(self, when: datetime, op: str, params: SearchParams) -> None:
super().__init__(params)
self.when = when.date()
self.op = op
@classmethod
def _get_msg_date(cls, msg: MessageInterface) -> Optional[datetime]:
return msg.internal_date
def matches(self, msg_seq: int, msg: MessageInterface) -> bool:
msg_datetime = self._get_msg_date(msg)
if msg_datetime is None:
return False
msg_date = msg_datetime.date()
if self.op == '<': # BEFORE
return msg_date < self.when
elif self.op == '=': # ON
return msg_date == self.when
elif self.op == '>=': # SINCE
return msg_date >= self.when
raise ValueError(self.op)
class HeaderDateSearchCriteria(DateSearchCriteria):
"""Matches by comparing against the ``Date:`` header of the message."""
@classmethod
def _get_msg_date(cls, msg: MessageInterface) -> Optional[datetime]:
envelope = msg.get_envelope_structure()
return envelope.date.datetime if envelope.date else None
class SizeSearchCriteria(SearchCriteria):
"""Matches by comparing against the size of the message."""
def __init__(self, size: int, op: str, params: SearchParams) -> None:
super().__init__(params)
self.size = size
self.op = op
def matches(self, msg_seq: int, msg: MessageInterface) -> bool:
size = msg.get_size()
if self.op == '<':
return size < self.size
elif self.op == '>':
return size > self.size
raise ValueError(self.op)
class EnvelopeSearchCriteria(SearchCriteria):
"""Matches by checking for strings within various fields of the envelope
structure.
"""
def __init__(self, key: bytes, value: str, params: SearchParams) -> None:
super().__init__(params)
self.key = key
self.value = value
def matches(self, msg_seq: int, msg: MessageInterface) -> bool:
envelope = msg.get_envelope_structure()
if self.key == b'BCC':
if not envelope.bcc:
return False
return any(self._in(self.value, str(bcc)) for bcc in envelope.bcc)
elif self.key == b'CC':
if not envelope.cc:
return False
return any(self._in(self.value, str(cc)) for cc in envelope.cc)
elif self.key == b'FROM':
if not envelope.from_:
return False
return any(self._in(self.value, str(from_))
for from_ in envelope.from_)
elif self.key == b'SUBJECT':
if not envelope.subject:
return False
return self._in(self.value, str(envelope.subject))
elif self.key == b'TO':
if not envelope.to:
return False
return any(self._in(self.value, str(to)) for to in envelope.to)
raise ValueError(self.key)
class HeaderSearchCriteria(SearchCriteria):
"""Matches if the message has a header containing a value."""
def __init__(self, name: str, value: str, params: SearchParams) -> None:
super().__init__(params)
self.name = name.encode('ascii')
self.value = value
def matches(self, msg_seq: int, msg: MessageInterface) -> bool:
values = msg.get_header(self.name)
return any(self._in(self.value, value) for value in values)
class BodySearchCriteria(SearchCriteria):
"""Matches if the message body contains a value."""
def __init__(self, value: str, params: SearchParams) -> None:
super().__init__(params)
self.value = bytes(value, 'utf-8', 'replace')
def matches(self, msg_seq: int, msg: MessageInterface) -> bool:
return msg.contains(self.value)
|
#!/usr/bin/env python
#
# Electrum - lightweight Electrum client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import ast
import threading
import json
import copy
import re
import stat
import pbkdf2, hmac, hashlib
import base64
import zlib
from .util import PrintError, profiler, InvalidPassword, WalletFileException
from .plugins import run_hook, plugin_loaders
from .keystore import bip44_derivation
from . import bitcoin
# seed_version is now used for the version of the wallet file
OLD_SEED_VERSION = 4 # electrum versions < 2.0
NEW_SEED_VERSION = 11 # electrum versions >= 2.0
FINAL_SEED_VERSION = 16 # electrum >= 2.7 will set this to prevent
# old versions from overwriting new format
def multisig_type(wallet_type):
'''If wallet_type is mofn multi-sig, return [m, n],
otherwise return None.'''
if not wallet_type:
return None
match = re.match('(\d+)of(\d+)', wallet_type)
if match:
match = [int(x) for x in match.group(1, 2)]
return match
def get_derivation_used_for_hw_device_encryption():
return ("m"
"/4541509'" # ascii 'ELE' as decimal ("BIP43 purpose")
"/1112098098'") # ascii 'BIE2' as decimal
# storage encryption version
STO_EV_PLAINTEXT, STO_EV_USER_PW, STO_EV_XPUB_PW = range(0, 3)
class WalletStorage(PrintError):
def __init__(self, path, manual_upgrades=False):
self.print_error("wallet path", path)
self.manual_upgrades = manual_upgrades
self.lock = threading.RLock()
self.data = {}
self.path = path
self.modified = False
self.pubkey = None
if self.file_exists():
with open(self.path, "r", encoding='utf-8') as f:
self.raw = f.read()
self._encryption_version = self._init_encryption_version()
if not self.is_encrypted():
self.load_data(self.raw)
else:
self._encryption_version = STO_EV_PLAINTEXT
# avoid new wallets getting 'upgraded'
self.put('seed_version', FINAL_SEED_VERSION)
def load_data(self, s):
try:
self.data = json.loads(s)
except:
try:
d = ast.literal_eval(s)
labels = d.get('labels', {})
except Exception as e:
raise IOError("Cannot read wallet file '%s'" % self.path)
self.data = {}
for key, value in d.items():
try:
json.dumps(key)
json.dumps(value)
except:
self.print_error('Failed to convert label to json format', key)
continue
self.data[key] = value
# check here if I need to load a plugin
t = self.get('wallet_type')
l = plugin_loaders.get(t)
if l: l()
if not self.manual_upgrades:
if self.requires_split():
raise WalletFileException("This wallet has multiple accounts and must be split")
if self.requires_upgrade():
self.upgrade()
def is_past_initial_decryption(self):
"""Return if storage is in a usable state for normal operations.
The value is True exactly
if encryption is disabled completely (self.is_encrypted() == False),
or if encryption is enabled but the contents have already been decrypted.
"""
return bool(self.data)
def is_encrypted(self):
"""Return if storage encryption is currently enabled."""
return self.get_encryption_version() != STO_EV_PLAINTEXT
def is_encrypted_with_user_pw(self):
return self.get_encryption_version() == STO_EV_USER_PW
def is_encrypted_with_hw_device(self):
return self.get_encryption_version() == STO_EV_XPUB_PW
def get_encryption_version(self):
"""Return the version of encryption used for this storage.
0: plaintext / no encryption
ECIES, private key derived from a password,
1: password is provided by user
2: password is derived from an xpub; used with hw wallets
"""
return self._encryption_version
def _init_encryption_version(self):
try:
magic = base64.b64decode(self.raw)[0:4]
if magic == b'BIE1':
return STO_EV_USER_PW
elif magic == b'BIE2':
return STO_EV_XPUB_PW
else:
return STO_EV_PLAINTEXT
except:
return STO_EV_PLAINTEXT
def file_exists(self):
return self.path and os.path.exists(self.path)
def get_key(self, password):
secret = pbkdf2.PBKDF2(password, '', iterations = 1024, macmodule = hmac, digestmodule = hashlib.sha512).read(64)
ec_key = bitcoin.EC_KEY(secret)
return ec_key
def _get_encryption_magic(self):
v = self._encryption_version
if v == STO_EV_USER_PW:
return b'BIE1'
elif v == STO_EV_XPUB_PW:
return b'BIE2'
else:
raise WalletFileException('no encryption magic for version: %s' % v)
def decrypt(self, password):
ec_key = self.get_key(password)
if self.raw:
enc_magic = self._get_encryption_magic()
s = zlib.decompress(ec_key.decrypt_message(self.raw, enc_magic))
else:
s = None
self.pubkey = ec_key.get_public_key()
s = s.decode('utf8')
self.load_data(s)
def check_password(self, password):
"""Raises an InvalidPassword exception on invalid password"""
if not self.is_encrypted():
return
if self.pubkey and self.pubkey != self.get_key(password).get_public_key():
raise InvalidPassword()
def set_keystore_encryption(self, enable):
self.put('use_encryption', enable)
def set_password(self, password, enc_version=None):
"""Set a password to be used for encrypting this storage."""
if enc_version is None:
enc_version = self._encryption_version
if password and enc_version != STO_EV_PLAINTEXT:
ec_key = self.get_key(password)
self.pubkey = ec_key.get_public_key()
self._encryption_version = enc_version
else:
self.pubkey = None
self._encryption_version = STO_EV_PLAINTEXT
# make sure next storage.write() saves changes
with self.lock:
self.modified = True
def get(self, key, default=None):
with self.lock:
v = self.data.get(key)
if v is None:
v = default
else:
v = copy.deepcopy(v)
return v
def put(self, key, value):
try:
json.dumps(key)
json.dumps(value)
except:
self.print_error("json error: cannot save", key)
return
with self.lock:
if value is not None:
if self.data.get(key) != value:
self.modified = True
self.data[key] = copy.deepcopy(value)
elif key in self.data:
self.modified = True
self.data.pop(key)
@profiler
def write(self):
with self.lock:
self._write()
def _write(self):
if threading.currentThread().isDaemon():
self.print_error('warning: daemon thread cannot write wallet')
return
if not self.modified:
return
s = json.dumps(self.data, indent=4, sort_keys=True)
if self.pubkey:
s = bytes(s, 'utf8')
c = zlib.compress(s)
enc_magic = self._get_encryption_magic()
s = bitcoin.encrypt_message(c, self.pubkey, enc_magic)
s = s.decode('utf8')
temp_path = "%s.tmp.%s" % (self.path, os.getpid())
with open(temp_path, "w", encoding='utf-8') as f:
f.write(s)
f.flush()
os.fsync(f.fileno())
mode = os.stat(self.path).st_mode if os.path.exists(self.path) else stat.S_IREAD | stat.S_IWRITE
# perform atomic write on POSIX systems
try:
os.rename(temp_path, self.path)
except:
os.remove(self.path)
os.rename(temp_path, self.path)
os.chmod(self.path, mode)
self.print_error("saved", self.path)
self.modified = False
def requires_split(self):
d = self.get('accounts', {})
return len(d) > 1
def split_accounts(storage):
result = []
# backward compatibility with old wallets
d = storage.get('accounts', {})
if len(d) < 2:
return
wallet_type = storage.get('wallet_type')
if wallet_type == 'old':
assert len(d) == 2
storage1 = WalletStorage(storage.path + '.deterministic')
storage1.data = copy.deepcopy(storage.data)
storage1.put('accounts', {'0': d['0']})
storage1.upgrade()
storage1.write()
storage2 = WalletStorage(storage.path + '.imported')
storage2.data = copy.deepcopy(storage.data)
storage2.put('accounts', {'/x': d['/x']})
storage2.put('seed', None)
storage2.put('seed_version', None)
storage2.put('master_public_key', None)
storage2.put('wallet_type', 'imported')
storage2.upgrade()
storage2.write()
result = [storage1.path, storage2.path]
elif wallet_type in ['bip44', 'trezor', 'keepkey', 'ledger', 'btchip', 'digitalbitbox']:
mpk = storage.get('master_public_keys')
for k in d.keys():
i = int(k)
x = d[k]
if x.get("pending"):
continue
xpub = mpk["x/%d'"%i]
new_path = storage.path + '.' + k
storage2 = WalletStorage(new_path)
storage2.data = copy.deepcopy(storage.data)
# save account, derivation and xpub at index 0
storage2.put('accounts', {'0': x})
storage2.put('master_public_keys', {"x/0'": xpub})
storage2.put('derivation', bip44_derivation(k))
storage2.upgrade()
storage2.write()
result.append(new_path)
else:
raise WalletFileException("This wallet has multiple accounts and must be split")
return result
def requires_upgrade(self):
return self.file_exists() and self.get_seed_version() < FINAL_SEED_VERSION
def upgrade(self):
self.print_error('upgrading wallet format')
self.convert_imported()
self.convert_wallet_type()
self.convert_account()
self.convert_version_13_b()
self.convert_version_14()
self.convert_version_15()
self.convert_version_16()
self.put('seed_version', FINAL_SEED_VERSION) # just to be sure
self.write()
def convert_wallet_type(self):
if not self._is_upgrade_method_needed(0, 13):
return
wallet_type = self.get('wallet_type')
if wallet_type == 'btchip': wallet_type = 'ledger'
if self.get('keystore') or self.get('x1/') or wallet_type=='imported':
return False
assert not self.requires_split()
seed_version = self.get_seed_version()
seed = self.get('seed')
xpubs = self.get('master_public_keys')
xprvs = self.get('master_private_keys', {})
mpk = self.get('master_public_key')
keypairs = self.get('keypairs')
key_type = self.get('key_type')
if seed_version == OLD_SEED_VERSION or wallet_type == 'old':
d = {
'type': 'old',
'seed': seed,
'mpk': mpk,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif key_type == 'imported':
d = {
'type': 'imported',
'keypairs': keypairs,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif wallet_type in ['xpub', 'standard']:
xpub = xpubs["x/"]
xprv = xprvs.get("x/")
d = {
'type': 'bip32',
'xpub': xpub,
'xprv': xprv,
'seed': seed,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif wallet_type in ['bip44']:
xpub = xpubs["x/0'"]
xprv = xprvs.get("x/0'")
d = {
'type': 'bip32',
'xpub': xpub,
'xprv': xprv,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif wallet_type in ['trezor', 'keepkey', 'ledger', 'digitalbitbox']:
xpub = xpubs["x/0'"]
derivation = self.get('derivation', bip44_derivation(0))
d = {
'type': 'hardware',
'hw_type': wallet_type,
'xpub': xpub,
'derivation': derivation,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif (wallet_type == '2fa') or multisig_type(wallet_type):
for key in xpubs.keys():
d = {
'type': 'bip32',
'xpub': xpubs[key],
'xprv': xprvs.get(key),
}
if key == 'x1/' and seed:
d['seed'] = seed
self.put(key, d)
else:
raise WalletFileException('Unable to tell wallet type. Is this even a wallet file?')
# remove junk
self.put('master_public_key', None)
self.put('master_public_keys', None)
self.put('master_private_keys', None)
self.put('derivation', None)
self.put('seed', None)
self.put('keypairs', None)
self.put('key_type', None)
def convert_version_13_b(self):
# version 13 is ambiguous, and has an earlier and a later structure
if not self._is_upgrade_method_needed(0, 13):
return
if self.get('wallet_type') == 'standard':
if self.get('keystore').get('type') == 'imported':
pubkeys = self.get('keystore').get('keypairs').keys()
d = {'change': []}
receiving_addresses = []
for pubkey in pubkeys:
addr = bitcoin.pubkey_to_address('p2pkh', pubkey)
receiving_addresses.append(addr)
d['receiving'] = receiving_addresses
self.put('addresses', d)
self.put('pubkeys', None)
self.put('seed_version', 13)
def convert_version_14(self):
# convert imported wallets for 3.0
if not self._is_upgrade_method_needed(13, 13):
return
if self.get('wallet_type') =='imported':
addresses = self.get('addresses')
if type(addresses) is list:
addresses = dict([(x, None) for x in addresses])
self.put('addresses', addresses)
elif self.get('wallet_type') == 'standard':
if self.get('keystore').get('type')=='imported':
addresses = set(self.get('addresses').get('receiving'))
pubkeys = self.get('keystore').get('keypairs').keys()
assert len(addresses) == len(pubkeys)
d = {}
for pubkey in pubkeys:
addr = bitcoin.pubkey_to_address('p2pkh', pubkey)
assert addr in addresses
d[addr] = {
'pubkey': pubkey,
'redeem_script': None,
'type': 'p2pkh'
}
self.put('addresses', d)
self.put('pubkeys', None)
self.put('wallet_type', 'imported')
self.put('seed_version', 14)
def convert_version_15(self):
if not self._is_upgrade_method_needed(14, 14):
return
assert self.get('seed_type') != 'segwit' # unsupported derivation
self.put('seed_version', 15)
def convert_version_16(self):
# fixes issue #3193 for Imported_Wallets with addresses
# also, previous versions allowed importing any garbage as an address
# which we now try to remove, see pr #3191
if not self._is_upgrade_method_needed(15, 15):
return
def remove_address(addr):
def remove_from_dict(dict_name):
d = self.get(dict_name, None)
if d is not None:
d.pop(addr, None)
self.put(dict_name, d)
def remove_from_list(list_name):
lst = self.get(list_name, None)
if lst is not None:
s = set(lst)
s -= {addr}
self.put(list_name, list(s))
# note: we don't remove 'addr' from self.get('addresses')
remove_from_dict('addr_history')
remove_from_dict('labels')
remove_from_dict('payment_requests')
remove_from_list('frozen_addresses')
if self.get('wallet_type') == 'imported':
addresses = self.get('addresses')
assert isinstance(addresses, dict)
addresses_new = dict()
for address, details in addresses.items():
if not bitcoin.is_address(address):
remove_address(address)
continue
if details is None:
addresses_new[address] = {}
else:
addresses_new[address] = details
self.put('addresses', addresses_new)
self.put('seed_version', 16)
def convert_imported(self):
if not self._is_upgrade_method_needed(0, 13):
return
# '/x' is the internal ID for imported accounts
d = self.get('accounts', {}).get('/x', {}).get('imported',{})
if not d:
return False
addresses = []
keypairs = {}
for addr, v in d.items():
pubkey, privkey = v
if privkey:
keypairs[pubkey] = privkey
else:
addresses.append(addr)
if addresses and keypairs:
raise WalletFileException('mixed addresses and privkeys')
elif addresses:
self.put('addresses', addresses)
self.put('accounts', None)
elif keypairs:
self.put('wallet_type', 'standard')
self.put('key_type', 'imported')
self.put('keypairs', keypairs)
self.put('accounts', None)
else:
raise WalletFileException('no addresses or privkeys')
def convert_account(self):
if not self._is_upgrade_method_needed(0, 13):
return
self.put('accounts', None)
def _is_upgrade_method_needed(self, min_version, max_version):
cur_version = self.get_seed_version()
if cur_version > max_version:
return False
elif cur_version < min_version:
raise WalletFileException(
'storage upgrade: unexpected version {} (should be {}-{})'
.format(cur_version, min_version, max_version))
else:
return True
def get_action(self):
action = run_hook('get_action', self)
if action:
return action
if not self.file_exists():
return 'new'
def get_seed_version(self):
seed_version = self.get('seed_version')
if not seed_version:
seed_version = OLD_SEED_VERSION if len(self.get('master_public_key','')) == 128 else NEW_SEED_VERSION
if seed_version > FINAL_SEED_VERSION:
raise WalletFileException('This version of Electrum is too old to open this wallet.\n'
'(highest supported storage version: {}, version of this file: {})'
.format(FINAL_SEED_VERSION, seed_version))
if seed_version==14 and self.get('seed_type') == 'segwit':
self.raise_unsupported_version(seed_version)
if seed_version >=12:
return seed_version
if seed_version not in [OLD_SEED_VERSION, NEW_SEED_VERSION]:
self.raise_unsupported_version(seed_version)
return seed_version
def raise_unsupported_version(self, seed_version):
msg = "Your wallet has an unsupported seed version."
msg += '\n\nWallet file: %s' % os.path.abspath(self.path)
if seed_version in [5, 7, 8, 9, 10, 14]:
msg += "\n\nTo open this wallet, try 'git checkout seed_v%d'"%seed_version
if seed_version == 6:
# version 1.9.8 created v6 wallets when an incorrect seed was entered in the restore dialog
msg += '\n\nThis file was created because of a bug in version 1.9.8.'
if self.get('master_public_keys') is None and self.get('master_private_keys') is None and self.get('imported_keys') is None:
# pbkdf2 was not included with the binaries, and wallet creation aborted.
msg += "\nIt does not contain any keys, and can safely be removed."
else:
# creation was complete if electrum was run from source
msg += "\nPlease open this file with Electrum 1.9.8, and move your coins to a new wallet."
raise WalletFileException(msg)
|
import logging
import os
import shutil
import regex
from curation_utils.file_helper import get_storage_name
from doc_curation.md import content_processor
from doc_curation.md.file import MdFile
from indic_transliteration import sanscript
def ensure_ordinal_in_title(dir_path, transliteration_target=sanscript.DEVANAGARI, dry_run=False):
files = [os.path.join(dir_path, x) for x in os.listdir(dir_path) if x != "_index.md" and x.endswith(".md")]
files.sort()
for index, file in enumerate(files):
md_file = MdFile(file_path=os.path.join(dir_path, file))
title = md_file.get_title(omit_chapter_id=False)
title = regex.sub("(^[\d०-९೦-೯ ]+ )", "", title)
# if regex.fullmatch("[+०-९0-9].+", title):
# return
format = "%%0%dd" % (len(str(len(files))))
index = format % (index + 1)
if transliteration_target:
index = sanscript.transliterate(index, sanscript.OPTITRANS, transliteration_target)
title = "%s %s" % (index, title)
md_file.set_title(title=title, dry_run=dry_run)
def fix_title_numbering(dir_path, dry_run):
files = [x for x in os.listdir(dir_path) if x != "_index.md" and x.endswith(".md")]
files.sort()
for index, file in enumerate(files):
md_file = os.path.join(dir_path, file)
title = md_file.get_title()
if title is None:
return
import regex
new_title = regex.sub("(^[०-९][^०-९])", "०\\1", title)
if title != new_title:
logging.info("Changing '%s' to '%s'", title, new_title)
md_file.set_title(title=new_title, dry_run=dry_run)
def set_filename_from_title(md_file, transliteration_source=sanscript.DEVANAGARI, dry_run=False, skip_dirs=True):
# logging.debug(md_file.file_path)
if skip_dirs and str(md_file.file_path).endswith("_index.md"):
logging.info("Special file %s. Skipping." % md_file.file_path)
return
title = md_file.get_title(omit_chapter_id=False)
if transliteration_source is not None:
title = sanscript.transliterate(data=title, _from=transliteration_source, _to=sanscript.OPTITRANS)
if os.path.basename(md_file.file_path) == "_index.md":
current_path = os.path.dirname(md_file.file_path)
extension = ""
else:
current_path = md_file.file_path
extension = ".md"
file_name = get_storage_name(text=title) + extension
file_path = os.path.join(os.path.dirname(current_path), file_name)
if str(current_path) != file_path:
logging.info("Renaming %s to %s", current_path, file_path)
if not dry_run:
os.rename(src=current_path, dst=file_path)
def get_title_from_filename(file_path, transliteration_target):
if os.path.basename(file_path) == "_index.md":
dir_name = os.path.basename(os.path.dirname(file_path)).replace(".md", "")
title_optitrans = "+" + dir_name
else:
title_optitrans = os.path.basename(file_path).replace(".md", "")
title = title_optitrans.replace("_", " ")
if transliteration_target is not None:
title = sanscript.transliterate(data=title, _from=sanscript.OPTITRANS, _to=transliteration_target, maybe_use_dravidian_variant=True)
return title
def set_title_from_filename(md_file, transliteration_target=sanscript.DEVANAGARI, dry_run=False):
# logging.debug(md_file.file_path)
title = get_title_from_filename(file_path=md_file.file_path, transliteration_target=transliteration_target)
md_file.set_title(dry_run=dry_run, title=title)
def prepend_file_indexes_to_title(md_file, dry_run):
if os.path.basename(md_file.file_path) == "_index.md":
return
else:
index = regex.sub("_.+", "", os.path.basename(md_file.file_path))
title = index + " " + md_file.get_title(omit_chapter_id=False)
md_file.set_title(dry_run=dry_run, title=title)
def add_init_words_to_title(md_file, num_words=2, target_title_length=None,script=sanscript.DEVANAGARI, dry_run=False):
(metadata, content) = md_file.read()
title = metadata["title"]
extra_title = content_processor.title_from_text(text=content, num_words=num_words, target_title_length=target_title_length, script=script)
if extra_title is not None:
title = "%s %s" % (title.strip(), extra_title)
md_file.set_title(title=title, dry_run=dry_run)
def transliterate_title(md_file, transliteration_target=sanscript.DEVANAGARI, dry_run=False):
# md_file.replace_in_content("<div class=\"audioEmbed\".+?></div>\n", "")
logging.debug(md_file.file_path)
title_fixed = sanscript.transliterate(data=md_file.get_title(), _from=sanscript.OPTITRANS,
_to=transliteration_target)
md_file.set_title(title=title_fixed, dry_run=dry_run)
def remove_post_numeric_title_text(md_file, dry_run=False):
logging.debug(md_file.file_path)
title_fixed = regex.sub("([\d०-९೦-೯-]+ ).+", "\\1", md_file.get_title())
md_file.set_title(title=title_fixed, dry_run=dry_run)
def fix_field_values(md_files,
spreadhsheet_id, worksheet_name, id_column, value_column,
md_file_to_id, md_frontmatter_field_name="title", google_key='/home/vvasuki/sysconf/kunchikA/google/sanskritnlp/service_account_key.json', post_process_fn=None,
dry_run=False):
# logging.debug(adhyaaya_to_mp3_map)
logging.info("Fixing titles of %d files", len(md_files))
from curation_utils.google import sheets
doc_data = sheets.IndexSheet(spreadhsheet_id=spreadhsheet_id, worksheet_name=worksheet_name, google_key=google_key,
id_column=id_column)
for md_file in md_files:
# md_file.replace_in_content("<div class=\"audioEmbed\".+?></div>\n", "")
logging.debug(md_file.file_path)
adhyaaya_id = md_file_to_id(md_file)
if adhyaaya_id != None:
logging.debug(adhyaaya_id)
value = doc_data.get_value(adhyaaya_id, column_name=value_column)
if post_process_fn is not None:
value = post_process_fn(value)
if value != None:
md_file.set_frontmatter_field_value(field_name=md_frontmatter_field_name, value=value, dry_run=dry_run)
def get_metadata_field_values(md_files, field_name):
# logging.debug(adhyaaya_to_mp3_map)
logging.info("Getting metadata from %s field of %d files", field_name, len(md_files))
for md_file in md_files:
# md_file.replace_in_content("<div class=\"audioEmbed\".+?></div>\n", "")
logging.debug(md_file.file_path)
(metadata, md) = md_file.read()
yield metadata[field_name]
def shloka_title_maker(text):
id_in_text = sanscript.transliterate(regex.search("॥\s*([०-९\d\.]+)\s*॥", text).group(1), sanscript.DEVANAGARI, sanscript.OPTITRANS)
id_in_text = regex.search("\.?\s*(\d+)\s*$", id_in_text).group(1)
title_id = "%03d" % int(id_in_text)
title = content_processor.title_from_text(text=text, num_words=2, target_title_length=None, depunctuate=True, title_id=title_id)
return title
def copy_metadata_and_filename(dest_dir, ref_dir, sub_path_id_maker=None, dry_run=False):
from doc_curation.md import library
sub_path_to_reference = library.get_sub_path_to_reference_map(ref_dir=ref_dir, sub_path_id_maker=sub_path_id_maker)
dest_md_files = library.get_md_files_from_path(dir_path=dest_dir)
if sub_path_id_maker is None:
sub_path_id_maker = lambda x: library.get_sub_path_id(sub_path=str(x).replace(dest_dir, ""))
for md_file in dest_md_files:
sub_path_id = sub_path_id_maker(md_file.file_path)
if sub_path_id is None:
continue
ref_md = sub_path_to_reference[sub_path_id]
sub_file_path_ref = str(ref_md.file_path).replace(ref_dir, "")
(ref_metadata, _) = ref_md.read()
md_file.replace_content_metadata(new_metadata=ref_metadata, dry_run=dry_run)
target_path = os.path.abspath("%s/%s" % (dest_dir, sub_file_path_ref))
if dry_run:
logging.info("Moving %s to %s", md_file.file_path, target_path)
else:
os.makedirs(os.path.dirname(target_path), exist_ok=True)
shutil.move(md_file.file_path, target_path)
|
from .particlefilter import ParticleFilter
|
from collections import defaultdict
from typing import Any, Dict, List, Sequence
from multiaddr import Multiaddr
from libp2p.crypto.keys import KeyPair, PrivateKey, PublicKey
from .id import ID
from .peerdata import PeerData, PeerDataError
from .peerinfo import PeerInfo
from .peerstore_interface import IPeerStore
class PeerStore(IPeerStore):
peer_data_map: Dict[ID, PeerData]
def __init__(self) -> None:
self.peer_data_map = defaultdict(PeerData)
def peer_info(self, peer_id: ID) -> PeerInfo:
"""
:param peer_id: peer ID to get info for
:return: peer info object
"""
if peer_id in self.peer_data_map:
peer_data = self.peer_data_map[peer_id]
return PeerInfo(peer_id, peer_data.get_addrs())
raise PeerStoreError("peer ID not found")
def get_protocols(self, peer_id: ID) -> List[str]:
"""
:param peer_id: peer ID to get protocols for
:return: protocols (as list of strings)
:raise PeerStoreError: if peer ID not found
"""
if peer_id in self.peer_data_map:
return self.peer_data_map[peer_id].get_protocols()
raise PeerStoreError("peer ID not found")
def add_protocols(self, peer_id: ID, protocols: Sequence[str]) -> None:
"""
:param peer_id: peer ID to add protocols for
:param protocols: protocols to add
"""
peer_data = self.peer_data_map[peer_id]
peer_data.add_protocols(list(protocols))
def set_protocols(self, peer_id: ID, protocols: Sequence[str]) -> None:
"""
:param peer_id: peer ID to set protocols for
:param protocols: protocols to set
"""
peer_data = self.peer_data_map[peer_id]
peer_data.set_protocols(list(protocols))
def peer_ids(self) -> List[ID]:
"""
:return: all of the peer IDs stored in peer store
"""
return list(self.peer_data_map.keys())
def get(self, peer_id: ID, key: str) -> Any:
"""
:param peer_id: peer ID to get peer data for
:param key: the key to search value for
:return: value corresponding to the key
:raise PeerStoreError: if peer ID or value not found
"""
if peer_id in self.peer_data_map:
try:
val = self.peer_data_map[peer_id].get_metadata(key)
except PeerDataError as error:
raise PeerStoreError() from error
return val
raise PeerStoreError("peer ID not found")
def put(self, peer_id: ID, key: str, val: Any) -> None:
"""
:param peer_id: peer ID to put peer data for
:param key:
:param value:
"""
peer_data = self.peer_data_map[peer_id]
peer_data.put_metadata(key, val)
def add_addr(self, peer_id: ID, addr: Multiaddr, ttl: int) -> None:
"""
:param peer_id: peer ID to add address for
:param addr:
:param ttl: time-to-live for the this record
"""
self.add_addrs(peer_id, [addr], ttl)
def add_addrs(self, peer_id: ID, addrs: Sequence[Multiaddr], ttl: int) -> None:
"""
:param peer_id: peer ID to add address for
:param addrs:
:param ttl: time-to-live for the this record
"""
# Ignore ttl for now
peer_data = self.peer_data_map[peer_id]
peer_data.add_addrs(list(addrs))
def addrs(self, peer_id: ID) -> List[Multiaddr]:
"""
:param peer_id: peer ID to get addrs for
:return: list of addrs
:raise PeerStoreError: if peer ID not found
"""
if peer_id in self.peer_data_map:
return self.peer_data_map[peer_id].get_addrs()
raise PeerStoreError("peer ID not found")
def clear_addrs(self, peer_id: ID) -> None:
"""
:param peer_id: peer ID to clear addrs for
"""
# Only clear addresses if the peer is in peer map
if peer_id in self.peer_data_map:
self.peer_data_map[peer_id].clear_addrs()
def peers_with_addrs(self) -> List[ID]:
"""
:return: all of the peer IDs which has addrs stored in peer store
"""
# Add all peers with addrs at least 1 to output
output: List[ID] = []
for peer_id in self.peer_data_map:
if len(self.peer_data_map[peer_id].get_addrs()) >= 1:
output.append(peer_id)
return output
def add_pubkey(self, peer_id: ID, pubkey: PublicKey) -> None:
"""
:param peer_id: peer ID to add public key for
:param pubkey:
:raise PeerStoreError: if peer ID and pubkey does not match
"""
peer_data = self.peer_data_map[peer_id]
if ID.from_pubkey(pubkey) != peer_id:
raise PeerStoreError("peer ID and pubkey does not match")
peer_data.add_pubkey(pubkey)
def pubkey(self, peer_id: ID) -> PublicKey:
"""
:param peer_id: peer ID to get public key for
:return: public key of the peer
:raise PeerStoreError: if peer ID or peer pubkey not found
"""
if peer_id in self.peer_data_map:
peer_data = self.peer_data_map[peer_id]
try:
pubkey = peer_data.get_pubkey()
except PeerDataError as e:
raise PeerStoreError("peer pubkey not found") from e
return pubkey
raise PeerStoreError("peer ID not found")
def add_privkey(self, peer_id: ID, privkey: PrivateKey) -> None:
"""
:param peer_id: peer ID to add private key for
:param privkey:
:raise PeerStoreError: if peer ID or peer privkey not found
"""
peer_data = self.peer_data_map[peer_id]
if ID.from_pubkey(privkey.get_public_key()) != peer_id:
raise PeerStoreError("peer ID and privkey does not match")
peer_data.add_privkey(privkey)
def privkey(self, peer_id: ID) -> PrivateKey:
"""
:param peer_id: peer ID to get private key for
:return: private key of the peer
:raise PeerStoreError: if peer ID or peer privkey not found
"""
if peer_id in self.peer_data_map:
peer_data = self.peer_data_map[peer_id]
try:
privkey = peer_data.get_privkey()
except PeerDataError as e:
raise PeerStoreError("peer privkey not found") from e
return privkey
raise PeerStoreError("peer ID not found")
def add_key_pair(self, peer_id: ID, key_pair: KeyPair) -> None:
"""
:param peer_id: peer ID to add private key for
:param key_pair:
"""
self.add_pubkey(peer_id, key_pair.public_key)
self.add_privkey(peer_id, key_pair.private_key)
class PeerStoreError(KeyError):
"""Raised when peer ID is not found in peer store."""
|
from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-integer-maxExclusive-5-NS"
@dataclass
class NistschemaSvIvAtomicIntegerMaxExclusive5:
class Meta:
name = "NISTSchema-SV-IV-atomic-integer-maxExclusive-5"
namespace = "NISTSchema-SV-IV-atomic-integer-maxExclusive-5-NS"
value: Optional[int] = field(
default=None,
metadata={
"required": True,
"max_exclusive": 999999999999999999,
}
)
|
import nuke
import contextlib
from avalon import api, io
from pype.api import get_current_project_settings
from pype.hosts.nuke.api.lib import (
get_imageio_input_colorspace
)
@contextlib.contextmanager
def preserve_trim(node):
"""Preserve the relative trim of the Loader tool.
This tries to preserve the loader's trim (trim in and trim out) after
the context by reapplying the "amount" it trims on the clip's length at
start and end.
"""
# working script frame range
script_start = nuke.root()["first_frame"].value()
start_at_frame = None
offset_frame = None
if node['frame_mode'].value() == "start at":
start_at_frame = node['frame'].value()
if node['frame_mode'].value() == "offset":
offset_frame = node['frame'].value()
try:
yield
finally:
if start_at_frame:
node['frame_mode'].setValue("start at")
node['frame'].setValue(str(script_start))
print("start frame of Read was set to"
"{}".format(script_start))
if offset_frame:
node['frame_mode'].setValue("offset")
node['frame'].setValue(str((script_start + offset_frame)))
print("start frame of Read was set to"
"{}".format(script_start))
def add_review_presets_config():
returning = {
"families": list(),
"representations": list()
}
settings = get_current_project_settings()
review_profiles = (
settings["global"]
["publish"]
["ExtractReview"]
["profiles"]
)
outputs = {}
for profile in review_profiles:
outputs.update(profile.get("outputs", {}))
for output, properities in outputs.items():
returning["representations"].append(output)
returning["families"] += properities.get("families", [])
return returning
class LoadMov(api.Loader):
"""Load mov file into Nuke"""
families = ["render", "source", "plate", "review"]
representations = ["mov", "review", "mp4"]
label = "Load mov"
order = -10
icon = "code-fork"
color = "orange"
script_start = nuke.root()["first_frame"].value()
node_name_template = "{class_name}_{ext}"
def load(self, context, name, namespace, data):
from avalon.nuke import (
containerise,
viewer_update_and_undo_stop
)
version = context['version']
version_data = version.get("data", {})
repr_id = context["representation"]["_id"]
orig_first = version_data.get("frameStart")
orig_last = version_data.get("frameEnd")
diff = orig_first - 1
first = orig_first - diff
last = orig_last - diff
handle_start = version_data.get("handleStart", 0)
handle_end = version_data.get("handleEnd", 0)
colorspace = version_data.get("colorspace")
repr_cont = context["representation"]["context"]
self.log.debug(
"Representation id `{}` ".format(repr_id))
context["representation"]["_id"]
# create handles offset (only to last, because of mov)
last += handle_start + handle_end
# Fallback to asset name when namespace is None
if namespace is None:
namespace = context['asset']['name']
file = self.fname
if not file:
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
file = file.replace("\\", "/")
name_data = {
"asset": repr_cont["asset"],
"subset": repr_cont["subset"],
"representation": context["representation"]["name"],
"ext": repr_cont["representation"],
"id": context["representation"]["_id"],
"class_name": self.__class__.__name__
}
read_name = self.node_name_template.format(**name_data)
# Create the Loader with the filename path set
with viewer_update_and_undo_stop():
read_node = nuke.createNode(
"Read",
"name {}".format(read_name)
)
read_node["file"].setValue(file)
read_node["origfirst"].setValue(first)
read_node["first"].setValue(first)
read_node["origlast"].setValue(last)
read_node["last"].setValue(last)
# start at script start
read_node['frame_mode'].setValue("start at")
read_node['frame'].setValue(str(self.script_start))
if colorspace:
read_node["colorspace"].setValue(str(colorspace))
preset_clrsp = get_imageio_input_colorspace(file)
if preset_clrsp is not None:
read_node["colorspace"].setValue(preset_clrsp)
# add additional metadata from the version to imprint Avalon knob
add_keys = [
"frameStart", "frameEnd", "handles", "source", "author",
"fps", "version", "handleStart", "handleEnd"
]
data_imprint = {}
for key in add_keys:
if key == 'version':
data_imprint.update({
key: context["version"]['name']
})
else:
data_imprint.update({
key: context["version"]['data'].get(key, str(None))
})
data_imprint.update({"objectName": read_name})
read_node["tile_color"].setValue(int("0x4ecd25ff", 16))
return containerise(
read_node,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__,
data=data_imprint
)
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, representation):
"""Update the Loader's path
Nuke automatically tries to reset some variables when changing
the loader's path to a new file. These automatic changes are to its
inputs:
"""
from avalon.nuke import (
update_container
)
read_node = nuke.toNode(container['objectName'])
assert read_node.Class() == "Read", "Must be Read"
file = self.fname
if not file:
repr_id = representation["_id"]
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
file = file.replace("\\", "/")
# Get start frame from version data
version = io.find_one({
"type": "version",
"_id": representation["parent"]
})
# get all versions in list
versions = io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')
max_version = max(versions)
version_data = version.get("data", {})
orig_first = version_data.get("frameStart")
orig_last = version_data.get("frameEnd")
diff = orig_first - 1
# set first to 1
first = orig_first - diff
last = orig_last - diff
handles = version_data.get("handles", 0)
handle_start = version_data.get("handleStart", 0)
handle_end = version_data.get("handleEnd", 0)
colorspace = version_data.get("colorspace")
if first is None:
self.log.warning((
"Missing start frame for updated version"
"assuming starts at frame 0 for: "
"{} ({})").format(
read_node['name'].value(), representation))
first = 0
# fix handle start and end if none are available
if not handle_start and not handle_end:
handle_start = handles
handle_end = handles
# create handles offset (only to last, because of mov)
last += handle_start + handle_end
# Update the loader's path whilst preserving some values
with preserve_trim(read_node):
read_node["file"].setValue(file)
self.log.info("__ node['file']: {}".format(
read_node["file"].value()))
# Set the global in to the start frame of the sequence
read_node["origfirst"].setValue(first)
read_node["first"].setValue(first)
read_node["origlast"].setValue(last)
read_node["last"].setValue(last)
# start at script start
read_node['frame_mode'].setValue("start at")
read_node['frame'].setValue(str(self.script_start))
if colorspace:
read_node["colorspace"].setValue(str(colorspace))
preset_clrsp = get_imageio_input_colorspace(file)
if preset_clrsp is not None:
read_node["colorspace"].setValue(preset_clrsp)
updated_dict = {}
updated_dict.update({
"representation": str(representation["_id"]),
"frameStart": str(first),
"frameEnd": str(last),
"version": str(version.get("name")),
"colorspace": version_data.get("colorspace"),
"source": version_data.get("source"),
"handleStart": str(handle_start),
"handleEnd": str(handle_end),
"fps": str(version_data.get("fps")),
"author": version_data.get("author"),
"outputDir": version_data.get("outputDir")
})
# change color of node
if version.get("name") not in [max_version]:
read_node["tile_color"].setValue(int("0xd84f20ff", 16))
else:
read_node["tile_color"].setValue(int("0x4ecd25ff", 16))
# Update the imprinted representation
update_container(
read_node, updated_dict
)
self.log.info("udated to version: {}".format(version.get("name")))
def remove(self, container):
from avalon.nuke import viewer_update_and_undo_stop
node = nuke.toNode(container['objectName'])
assert node.Class() == "Read", "Must be Read"
with viewer_update_and_undo_stop():
nuke.delete(node)
|
#!/usr/bin/env python3
"""Runs `flake8`."""
import os
import subprocess
import sys
WORKING_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), '../'))
FLAKE8_COMMAND = [sys.executable, '-m', 'pytest', '-v', '--flake8', '-m',
'flake8']
def main():
"""Main script function."""
os.chdir(WORKING_DIR)
subprocess.check_call(FLAKE8_COMMAND)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
"""Main module."""
import re
from .vorwahlen import vorwahlen
def split(number):
"""Returns (prefix, area, number)"""
nr_ = re.sub("[^0-9+]", "", number)
if nr_.startswith("+49"):
nr_ = nr_[3:]
elif nr_.startswith("00"):
nr_ = nr_[4:]
# it COULD have been: "+49(0)221...."
if nr_.startswith("0"):
nr_ = nr_[1:]
phone_number = nr_
for cnt in range(5, 1, -1):
area_code = nr_[:cnt]
if nr_[:cnt] in vorwahlen:
info = vorwahlen[area_code]
if info.add_digits:
area_code = nr_[:(cnt + info.add_digits)]
phone_number = nr_[(cnt+info.add_digits):]
if info.is_final:
if phone_number:
raise ValueError("Surplus digits: " + phone_number)
else:
phone_number = area_code
area_code = ""
break
else:
return None
return (
"0" if not info.is_final else "",
area_code,
phone_number,
info.info,
)
def format_split(info_tuple, country=True, leading_zero=True):
prefix, area_code, phone_number, info = info_tuple
rv = ""
if country:
if leading_zero and prefix:
rv = "+49 (" + prefix + ")"
else:
rv = "+49 "
elif prefix:
rv += prefix
if prefix:
if not area_code:
raise RuntimeError("Invalid state - prefix but NO area_code defined - " + str(info_tuple))
if not phone_number:
raise RuntimeError("Invalid state - prefix but ONLY area code defined - " + str(info_tuple))
rv += area_code + " " + phone_number
else:
if area_code:
rv += area_code + " "
rv += phone_number
return rv
def format(number, country=True, leading_zero=True):
return format_split(split(number), country=country, leading_zero=leading_zero)
|
# pylint: disable-msg=E1101,W0612
import operator
import pytest
from numpy import nan
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, bdate_range, Panel
from pandas.errors import PerformanceWarning
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.tseries.offsets import BDay
from pandas.util import testing as tm
from pandas.compat import lrange
from pandas import compat
from pandas.core.sparse import frame as spf
from pandas._libs.sparse import BlockIndex, IntIndex
from pandas.core.sparse.api import (
SparseSeries, SparseDataFrame, SparseArray, SparseDtype
)
from pandas.tests.frame.test_api import SharedWithSparse
class TestSparseDataFrame(SharedWithSparse):
klass = SparseDataFrame
# SharedWithSparse tests use generic, klass-agnostic assertion
_assert_frame_equal = staticmethod(tm.assert_sp_frame_equal)
_assert_series_equal = staticmethod(tm.assert_sp_series_equal)
def test_iterrows(self, float_frame, float_string_frame):
# Same as parent, but we don't ensure the sparse kind is the same.
for k, v in float_frame.iterrows():
exp = float_frame.loc[k]
tm.assert_sp_series_equal(v, exp, check_kind=False)
for k, v in float_string_frame.iterrows():
exp = float_string_frame.loc[k]
tm.assert_sp_series_equal(v, exp, check_kind=False)
def test_itertuples(self, float_frame):
for i, tup in enumerate(float_frame.itertuples()):
s = self.klass._constructor_sliced(tup[1:])
s.name = tup[0]
expected = float_frame.iloc[i, :].reset_index(drop=True)
tm.assert_sp_series_equal(s, expected, check_kind=False)
def test_fill_value_when_combine_const(self):
# GH12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype='float')
df = SparseDataFrame({'foo': dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_sp_frame_equal(res, exp)
def test_values(self, empty_frame, float_frame):
empty = empty_frame.values
assert empty.shape == (0, 0)
no_cols = SparseDataFrame(index=np.arange(10))
mat = no_cols.values
assert mat.shape == (10, 0)
no_index = SparseDataFrame(columns=np.arange(10))
mat = no_index.values
assert mat.shape == (0, 10)
def test_copy(self, float_frame):
cp = float_frame.copy()
assert isinstance(cp, SparseDataFrame)
tm.assert_sp_frame_equal(cp, float_frame)
# as of v0.15.0
# this is now identical (but not is_a )
assert cp.index.identical(float_frame.index)
def test_constructor(self, float_frame, float_frame_int_kind,
float_frame_fill0):
for col, series in compat.iteritems(float_frame):
assert isinstance(series, SparseSeries)
assert isinstance(float_frame_int_kind['A'].sp_index, IntIndex)
# constructed zframe from matrix above
assert float_frame_fill0['A'].fill_value == 0
# XXX: changed asarray
expected = pd.SparseArray([0, 0, 0, 0, 1., 2., 3., 4., 5., 6.],
fill_value=0, kind='block')
tm.assert_sp_array_equal(expected,
float_frame_fill0['A'].values)
tm.assert_numpy_array_equal(np.array([0., 0., 0., 0., 1., 2.,
3., 4., 5., 6.]),
float_frame_fill0['A'].to_dense().values)
# construct no data
sdf = SparseDataFrame(columns=np.arange(10), index=np.arange(10))
for col, series in compat.iteritems(sdf):
assert isinstance(series, SparseSeries)
# construct from nested dict
data = {}
for c, s in compat.iteritems(float_frame):
data[c] = s.to_dict()
sdf = SparseDataFrame(data)
tm.assert_sp_frame_equal(sdf, float_frame)
# TODO: test data is copied from inputs
# init dict with different index
idx = float_frame.index[:5]
cons = SparseDataFrame(
float_frame, index=idx, columns=float_frame.columns,
default_fill_value=float_frame.default_fill_value,
default_kind=float_frame.default_kind, copy=True)
reindexed = float_frame.reindex(idx)
tm.assert_sp_frame_equal(cons, reindexed, exact_indices=False)
# assert level parameter breaks reindex
with pytest.raises(TypeError):
float_frame.reindex(idx, level=0)
repr(float_frame)
def test_constructor_dict_order(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6, else
# order by value
d = {'b': [2, 3], 'a': [0, 1]}
frame = SparseDataFrame(data=d)
if compat.PY36:
expected = SparseDataFrame(data=d, columns=list('ba'))
else:
expected = SparseDataFrame(data=d, columns=list('ab'))
tm.assert_sp_frame_equal(frame, expected)
def test_constructor_ndarray(self, float_frame):
# no index or columns
sp = SparseDataFrame(float_frame.values)
# 1d
sp = SparseDataFrame(float_frame['A'].values, index=float_frame.index,
columns=['A'])
tm.assert_sp_frame_equal(sp, float_frame.reindex(columns=['A']))
# raise on level argument
pytest.raises(TypeError, float_frame.reindex, columns=['A'],
level=1)
# wrong length index / columns
with tm.assert_raises_regex(ValueError, "^Index length"):
SparseDataFrame(float_frame.values, index=float_frame.index[:-1])
with tm.assert_raises_regex(ValueError, "^Column length"):
SparseDataFrame(float_frame.values,
columns=float_frame.columns[:-1])
# GH 9272
def test_constructor_empty(self):
sp = SparseDataFrame()
assert len(sp.index) == 0
assert len(sp.columns) == 0
def test_constructor_dataframe(self, float_frame):
dense = float_frame.to_dense()
sp = SparseDataFrame(dense)
tm.assert_sp_frame_equal(sp, float_frame)
def test_constructor_convert_index_once(self):
arr = np.array([1.5, 2.5, 3.5])
sdf = SparseDataFrame(columns=lrange(4), index=arr)
assert sdf[0].index is sdf[1].index
def test_constructor_from_series(self):
# GH 2873
x = Series(np.random.randn(10000), name='a')
x = x.to_sparse(fill_value=0)
assert isinstance(x, SparseSeries)
df = SparseDataFrame(x)
assert isinstance(df, SparseDataFrame)
x = Series(np.random.randn(10000), name='a')
y = Series(np.random.randn(10000), name='b')
x2 = x.astype(float)
x2.loc[:9998] = np.NaN
# TODO: x_sparse is unused...fix
x_sparse = x2.to_sparse(fill_value=np.NaN) # noqa
# Currently fails too with weird ufunc error
# df1 = SparseDataFrame([x_sparse, y])
y.loc[:9998] = 0
# TODO: y_sparse is unsused...fix
y_sparse = y.to_sparse(fill_value=0) # noqa
# without sparse value raises error
# df2 = SparseDataFrame([x2_sparse, y])
def test_constructor_from_dense_series(self):
# GH 19393
# series with name
x = Series(np.random.randn(10000), name='a')
result = SparseDataFrame(x)
expected = x.to_frame().to_sparse()
tm.assert_sp_frame_equal(result, expected)
# series with no name
x = Series(np.random.randn(10000))
result = SparseDataFrame(x)
expected = x.to_frame().to_sparse()
tm.assert_sp_frame_equal(result, expected)
def test_constructor_from_unknown_type(self):
# GH 19393
class Unknown(object):
pass
with pytest.raises(TypeError,
message='SparseDataFrame called with unknown type '
'"Unknown" for data argument'):
SparseDataFrame(Unknown())
def test_constructor_preserve_attr(self):
# GH 13866
arr = pd.SparseArray([1, 0, 3, 0], dtype=np.int64, fill_value=0)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
df = pd.SparseDataFrame({'x': arr})
assert df['x'].dtype == SparseDtype(np.int64)
assert df['x'].fill_value == 0
s = pd.SparseSeries(arr, name='x')
assert s.dtype == SparseDtype(np.int64)
assert s.fill_value == 0
df = pd.SparseDataFrame(s)
assert df['x'].dtype == SparseDtype(np.int64)
assert df['x'].fill_value == 0
df = pd.SparseDataFrame({'x': s})
assert df['x'].dtype == SparseDtype(np.int64)
assert df['x'].fill_value == 0
def test_constructor_nan_dataframe(self):
# GH 10079
trains = np.arange(100)
thresholds = [10, 20, 30, 40, 50, 60]
tuples = [(i, j) for i in trains for j in thresholds]
index = pd.MultiIndex.from_tuples(tuples,
names=['trains', 'thresholds'])
matrix = np.empty((len(index), len(trains)))
matrix.fill(np.nan)
df = pd.DataFrame(matrix, index=index, columns=trains, dtype=float)
result = df.to_sparse()
expected = pd.SparseDataFrame(matrix, index=index, columns=trains,
dtype=float)
tm.assert_sp_frame_equal(result, expected)
def test_type_coercion_at_construction(self):
# GH 15682
result = pd.SparseDataFrame(
{'a': [1, 0, 0], 'b': [0, 1, 0], 'c': [0, 0, 1]}, dtype='uint8',
default_fill_value=0)
expected = pd.SparseDataFrame(
{'a': pd.SparseSeries([1, 0, 0], dtype='uint8'),
'b': pd.SparseSeries([0, 1, 0], dtype='uint8'),
'c': pd.SparseSeries([0, 0, 1], dtype='uint8')},
default_fill_value=0)
tm.assert_sp_frame_equal(result, expected)
def test_dtypes(self):
df = DataFrame(np.random.randn(10000, 4))
df.loc[:9998] = np.nan
sdf = df.to_sparse()
result = sdf.get_dtype_counts()
expected = Series({'Sparse[float64, nan]': 4})
tm.assert_series_equal(result, expected)
def test_shape(self, float_frame, float_frame_int_kind,
float_frame_fill0, float_frame_fill2):
# see gh-10452
assert float_frame.shape == (10, 4)
assert float_frame_int_kind.shape == (10, 4)
assert float_frame_fill0.shape == (10, 4)
assert float_frame_fill2.shape == (10, 4)
def test_str(self):
df = DataFrame(np.random.randn(10000, 4))
df.loc[:9998] = np.nan
sdf = df.to_sparse()
str(sdf)
def test_array_interface(self, float_frame):
res = np.sqrt(float_frame)
dres = np.sqrt(float_frame.to_dense())
tm.assert_frame_equal(res.to_dense(), dres)
def test_pickle(self, float_frame, float_frame_int_kind, float_frame_dense,
float_frame_fill0, float_frame_fill0_dense,
float_frame_fill2, float_frame_fill2_dense):
def _test_roundtrip(frame, orig):
result = tm.round_trip_pickle(frame)
tm.assert_sp_frame_equal(frame, result)
tm.assert_frame_equal(result.to_dense(), orig, check_dtype=False)
_test_roundtrip(SparseDataFrame(), DataFrame())
_test_roundtrip(float_frame, float_frame_dense)
_test_roundtrip(float_frame_int_kind, float_frame_dense)
_test_roundtrip(float_frame_fill0, float_frame_fill0_dense)
_test_roundtrip(float_frame_fill2, float_frame_fill2_dense)
def test_dense_to_sparse(self):
df = DataFrame({'A': [nan, nan, nan, 1, 2],
'B': [1, 2, nan, nan, nan]})
sdf = df.to_sparse()
assert isinstance(sdf, SparseDataFrame)
assert np.isnan(sdf.default_fill_value)
assert isinstance(sdf['A'].sp_index, BlockIndex)
tm.assert_frame_equal(sdf.to_dense(), df)
sdf = df.to_sparse(kind='integer')
assert isinstance(sdf['A'].sp_index, IntIndex)
df = DataFrame({'A': [0, 0, 0, 1, 2],
'B': [1, 2, 0, 0, 0]}, dtype=float)
sdf = df.to_sparse(fill_value=0)
assert sdf.default_fill_value == 0
tm.assert_frame_equal(sdf.to_dense(), df)
def test_density(self):
df = SparseSeries([nan, nan, nan, 0, 1, 2, 3, 4, 5, 6])
assert df.density == 0.7
df = SparseDataFrame({'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]})
assert df.density == 0.75
def test_sparse_to_dense(self):
pass
def test_sparse_series_ops(self, float_frame):
self._check_frame_ops(float_frame)
def test_sparse_series_ops_i(self, float_frame_int_kind):
self._check_frame_ops(float_frame_int_kind)
def test_sparse_series_ops_z(self, float_frame_fill0):
self._check_frame_ops(float_frame_fill0)
def test_sparse_series_ops_fill(self, float_frame_fill2):
self._check_frame_ops(float_frame_fill2)
def _check_frame_ops(self, frame):
def _compare_to_dense(a, b, da, db, op):
sparse_result = op(a, b)
dense_result = op(da, db)
fill = sparse_result.default_fill_value
dense_result = dense_result.to_sparse(fill_value=fill)
tm.assert_sp_frame_equal(sparse_result, dense_result,
exact_indices=False)
if isinstance(a, DataFrame) and isinstance(db, DataFrame):
mixed_result = op(a, db)
assert isinstance(mixed_result, SparseDataFrame)
tm.assert_sp_frame_equal(mixed_result, sparse_result,
exact_indices=False)
opnames = ['add', 'sub', 'mul', 'truediv', 'floordiv']
ops = [getattr(operator, name) for name in opnames]
fidx = frame.index
# time series operations
series = [frame['A'], frame['B'], frame['C'], frame['D'],
frame['A'].reindex(fidx[:7]), frame['A'].reindex(fidx[::2]),
SparseSeries(
[], index=[])]
for op in opnames:
_compare_to_dense(frame, frame[::2], frame.to_dense(),
frame[::2].to_dense(), getattr(operator, op))
# 2304, no auto-broadcasting
for i, s in enumerate(series):
f = lambda a, b: getattr(a, op)(b, axis='index')
_compare_to_dense(frame, s, frame.to_dense(), s.to_dense(), f)
# rops are not implemented
# _compare_to_dense(s, frame, s.to_dense(),
# frame.to_dense(), f)
# cross-sectional operations
series = [frame.xs(fidx[0]), frame.xs(fidx[3]), frame.xs(fidx[5]),
frame.xs(fidx[7]), frame.xs(fidx[5])[:2]]
for op in ops:
for s in series:
_compare_to_dense(frame, s, frame.to_dense(), s, op)
_compare_to_dense(s, frame, s, frame.to_dense(), op)
# it works!
result = frame + frame.loc[:, ['A', 'B']] # noqa
def test_op_corners(self, float_frame, empty_frame):
empty = empty_frame + empty_frame
assert empty.empty
foo = float_frame + empty_frame
assert isinstance(foo.index, DatetimeIndex)
tm.assert_frame_equal(foo, float_frame * np.nan)
foo = empty_frame + float_frame
tm.assert_frame_equal(foo, float_frame * np.nan)
def test_scalar_ops(self):
pass
def test_getitem(self):
# 1585 select multiple columns
sdf = SparseDataFrame(index=[0, 1, 2], columns=['a', 'b', 'c'])
result = sdf[['a', 'b']]
exp = sdf.reindex(columns=['a', 'b'])
tm.assert_sp_frame_equal(result, exp)
pytest.raises(Exception, sdf.__getitem__, ['a', 'd'])
def test_iloc(self, float_frame):
# GH 2227
result = float_frame.iloc[:, 0]
assert isinstance(result, SparseSeries)
tm.assert_sp_series_equal(result, float_frame['A'])
# preserve sparse index type. #2251
data = {'A': [0, 1]}
iframe = SparseDataFrame(data, default_kind='integer')
tm.assert_class_equal(iframe['A'].sp_index,
iframe.iloc[:, 0].sp_index)
def test_set_value(self, float_frame):
# ok, as the index gets converted to object
frame = float_frame.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = frame.set_value('foobar', 'B', 1.5)
assert res.index.dtype == 'object'
res = float_frame
res.index = res.index.astype(object)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = float_frame.set_value('foobar', 'B', 1.5)
assert res is not float_frame
assert res.index[-1] == 'foobar'
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert res.get_value('foobar', 'B') == 1.5
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res2 = res.set_value('foobar', 'qux', 1.5)
assert res2 is not res
tm.assert_index_equal(res2.columns,
pd.Index(list(float_frame.columns) + ['qux']))
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert res2.get_value('foobar', 'qux') == 1.5
def test_fancy_index_misc(self, float_frame):
# axis = 0
sliced = float_frame.iloc[-2:, :]
expected = float_frame.reindex(index=float_frame.index[-2:])
tm.assert_sp_frame_equal(sliced, expected)
# axis = 1
sliced = float_frame.iloc[:, -2:]
expected = float_frame.reindex(columns=float_frame.columns[-2:])
tm.assert_sp_frame_equal(sliced, expected)
def test_getitem_overload(self, float_frame):
# slicing
sl = float_frame[:20]
tm.assert_sp_frame_equal(sl,
float_frame.reindex(float_frame.index[:20]))
# boolean indexing
d = float_frame.index[5]
indexer = float_frame.index > d
subindex = float_frame.index[indexer]
subframe = float_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
pytest.raises(Exception, float_frame.__getitem__, indexer[:-1])
def test_setitem(self, float_frame, float_frame_int_kind,
float_frame_dense,
float_frame_fill0, float_frame_fill0_dense,
float_frame_fill2, float_frame_fill2_dense):
def _check_frame(frame, orig):
N = len(frame)
# insert SparseSeries
frame['E'] = frame['A']
assert isinstance(frame['E'], SparseSeries)
tm.assert_sp_series_equal(frame['E'], frame['A'],
check_names=False)
# insert SparseSeries differently-indexed
to_insert = frame['A'][::2]
frame['E'] = to_insert
expected = to_insert.to_dense().reindex(frame.index)
result = frame['E'].to_dense()
tm.assert_series_equal(result, expected, check_names=False)
assert result.name == 'E'
# insert Series
frame['F'] = frame['A'].to_dense()
assert isinstance(frame['F'], SparseSeries)
tm.assert_sp_series_equal(frame['F'], frame['A'],
check_names=False)
# insert Series differently-indexed
to_insert = frame['A'].to_dense()[::2]
frame['G'] = to_insert
expected = to_insert.reindex(frame.index)
expected.name = 'G'
tm.assert_series_equal(frame['G'].to_dense(), expected)
# insert ndarray
frame['H'] = np.random.randn(N)
assert isinstance(frame['H'], SparseSeries)
to_sparsify = np.random.randn(N)
to_sparsify[N // 2:] = frame.default_fill_value
frame['I'] = to_sparsify
assert len(frame['I'].sp_values) == N // 2
# insert ndarray wrong size
pytest.raises(Exception, frame.__setitem__, 'foo',
np.random.randn(N - 1))
# scalar value
frame['J'] = 5
assert len(frame['J'].sp_values) == N
assert (frame['J'].sp_values == 5).all()
frame['K'] = frame.default_fill_value
assert len(frame['K'].sp_values) == 0
_check_frame(float_frame, float_frame_dense)
_check_frame(float_frame_int_kind, float_frame_dense)
_check_frame(float_frame_fill0, float_frame_fill0_dense)
_check_frame(float_frame_fill2, float_frame_fill2_dense)
@pytest.mark.parametrize('values', [
[True, False],
[0, 1],
[1, None],
['a', 'b'],
[pd.Timestamp('2017'), pd.NaT],
[pd.Timedelta('10s'), pd.NaT],
])
def test_setitem_more(self, values):
df = pd.DataFrame({"A": values})
df['A'] = pd.SparseArray(values)
expected = pd.DataFrame({'A': pd.SparseArray(values)})
tm.assert_frame_equal(df, expected)
def test_setitem_corner(self, float_frame):
float_frame['a'] = float_frame['B']
tm.assert_sp_series_equal(float_frame['a'], float_frame['B'],
check_names=False)
def test_setitem_array(self, float_frame):
arr = float_frame['B']
float_frame['E'] = arr
tm.assert_sp_series_equal(float_frame['E'], float_frame['B'],
check_names=False)
float_frame['F'] = arr[:-1]
index = float_frame.index[:-1]
tm.assert_sp_series_equal(float_frame['E'].reindex(index),
float_frame['F'].reindex(index),
check_names=False)
def test_setitem_chained_no_consolidate(self):
# https://github.com/pandas-dev/pandas/pull/19268
# issuecomment-361696418
# chained setitem used to cause consolidation
sdf = pd.SparseDataFrame([[np.nan, 1], [2, np.nan]])
with pd.option_context('mode.chained_assignment', None):
sdf[0][1] = 2
assert len(sdf._data.blocks) == 2
def test_delitem(self, float_frame):
A = float_frame['A']
C = float_frame['C']
del float_frame['B']
assert 'B' not in float_frame
tm.assert_sp_series_equal(float_frame['A'], A)
tm.assert_sp_series_equal(float_frame['C'], C)
del float_frame['D']
assert 'D' not in float_frame
del float_frame['A']
assert 'A' not in float_frame
def test_set_columns(self, float_frame):
float_frame.columns = float_frame.columns
pytest.raises(Exception, setattr, float_frame, 'columns',
float_frame.columns[:-1])
def test_set_index(self, float_frame):
float_frame.index = float_frame.index
pytest.raises(Exception, setattr, float_frame, 'index',
float_frame.index[:-1])
def test_ctor_reindex(self):
idx = pd.Index([0, 1, 2, 3])
with tm.assert_raises_regex(ValueError, ''):
pd.SparseDataFrame({"A": [1, 2]}, index=idx)
def test_append(self, float_frame):
a = float_frame[:5]
b = float_frame[5:]
appended = a.append(b)
tm.assert_sp_frame_equal(appended, float_frame, exact_indices=False)
a = float_frame.iloc[:5, :3]
b = float_frame.iloc[5:]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Stacklevel is set for pd.concat, not append
appended = a.append(b)
tm.assert_sp_frame_equal(appended.iloc[:, :3], float_frame.iloc[:, :3],
exact_indices=False)
a = a[['B', 'C', 'A']].head(2)
b = b.head(2)
expected = pd.SparseDataFrame({
"B": [0., 1, None, 3],
"C": [0., 1, 5, 6],
"A": [None, None, 2, 3],
"D": [None, None, 5, None],
}, index=a.index | b.index, columns=['B', 'C', 'A', 'D'])
with tm.assert_produces_warning(None):
appended = a.append(b, sort=False)
tm.assert_frame_equal(appended, expected)
with tm.assert_produces_warning(None):
appended = a.append(b, sort=True)
tm.assert_sp_frame_equal(appended, expected[['A', 'B', 'C', 'D']],
consolidate_block_indices=True,
check_kind=False)
def test_astype(self):
sparse = pd.SparseDataFrame({'A': SparseArray([1, 2, 3, 4],
dtype=np.int64),
'B': SparseArray([4, 5, 6, 7],
dtype=np.int64)})
assert sparse['A'].dtype == SparseDtype(np.int64)
assert sparse['B'].dtype == SparseDtype(np.int64)
# retain fill_value
res = sparse.astype(np.float64)
exp = pd.SparseDataFrame({'A': SparseArray([1., 2., 3., 4.],
fill_value=0,
kind='integer'),
'B': SparseArray([4., 5., 6., 7.],
fill_value=0,
kind='integer')},
default_fill_value=np.nan)
tm.assert_sp_frame_equal(res, exp)
assert res['A'].dtype == SparseDtype(np.float64, 0)
assert res['B'].dtype == SparseDtype(np.float64, 0)
# update fill_value
res = sparse.astype(SparseDtype(np.float64, np.nan))
exp = pd.SparseDataFrame({'A': SparseArray([1., 2., 3., 4.],
fill_value=np.nan,
kind='integer'),
'B': SparseArray([4., 5., 6., 7.],
fill_value=np.nan,
kind='integer')},
default_fill_value=np.nan)
tm.assert_sp_frame_equal(res, exp)
assert res['A'].dtype == SparseDtype(np.float64, np.nan)
assert res['B'].dtype == SparseDtype(np.float64, np.nan)
def test_astype_bool(self):
sparse = pd.SparseDataFrame({'A': SparseArray([0, 2, 0, 4],
fill_value=0,
dtype=np.int64),
'B': SparseArray([0, 5, 0, 7],
fill_value=0,
dtype=np.int64)},
default_fill_value=0)
assert sparse['A'].dtype == SparseDtype(np.int64)
assert sparse['B'].dtype == SparseDtype(np.int64)
res = sparse.astype(SparseDtype(bool, False))
exp = pd.SparseDataFrame({'A': SparseArray([False, True, False, True],
dtype=np.bool,
fill_value=False,
kind='integer'),
'B': SparseArray([False, True, False, True],
dtype=np.bool,
fill_value=False,
kind='integer')},
default_fill_value=False)
tm.assert_sp_frame_equal(res, exp)
assert res['A'].dtype == SparseDtype(np.bool)
assert res['B'].dtype == SparseDtype(np.bool)
def test_fillna(self, float_frame_fill0, float_frame_fill0_dense):
df = float_frame_fill0.reindex(lrange(5))
dense = float_frame_fill0_dense.reindex(lrange(5))
result = df.fillna(0)
expected = dense.fillna(0)
tm.assert_sp_frame_equal(result, expected.to_sparse(fill_value=0),
exact_indices=False)
tm.assert_frame_equal(result.to_dense(), expected)
result = df.copy()
result.fillna(0, inplace=True)
expected = dense.fillna(0)
tm.assert_sp_frame_equal(result, expected.to_sparse(fill_value=0),
exact_indices=False)
tm.assert_frame_equal(result.to_dense(), expected)
result = df.copy()
result = df['A']
result.fillna(0, inplace=True)
expected = dense['A'].fillna(0)
# this changes internal SparseArray repr
# tm.assert_sp_series_equal(result, expected.to_sparse(fill_value=0))
tm.assert_series_equal(result.to_dense(), expected)
def test_fillna_fill_value(self):
df = pd.DataFrame({'A': [1, 0, 0], 'B': [np.nan, np.nan, 4]})
sparse = pd.SparseDataFrame(df)
tm.assert_frame_equal(sparse.fillna(-1).to_dense(),
df.fillna(-1), check_dtype=False)
sparse = pd.SparseDataFrame(df, default_fill_value=0)
tm.assert_frame_equal(sparse.fillna(-1).to_dense(),
df.fillna(-1), check_dtype=False)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
with tm.assert_produces_warning(PerformanceWarning):
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
with tm.assert_produces_warning(PerformanceWarning):
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
with tm.assert_produces_warning(PerformanceWarning):
result = result.fillna(method='pad', limit=5)
with tm.assert_produces_warning(PerformanceWarning):
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
with tm.assert_produces_warning(PerformanceWarning):
result = result.fillna(method='backfill', limit=5)
with tm.assert_produces_warning(PerformanceWarning):
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_rename(self, float_frame):
result = float_frame.rename(index=str)
expected = SparseDataFrame(float_frame.values,
index=float_frame.index.strftime(
"%Y-%m-%d %H:%M:%S"),
columns=list('ABCD'))
tm.assert_sp_frame_equal(result, expected)
result = float_frame.rename(columns=lambda x: '%s%d' % (x, 1))
data = {'A1': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B1': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C1': np.arange(10, dtype=np.float64),
'D1': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
expected = SparseDataFrame(data, index=float_frame.index)
tm.assert_sp_frame_equal(result, expected)
def test_corr(self, float_frame):
res = float_frame.corr()
# XXX: this stays sparse
tm.assert_frame_equal(res, float_frame.to_dense().corr().to_sparse())
def test_describe(self, float_frame):
float_frame['foo'] = np.nan
float_frame.get_dtype_counts()
str(float_frame)
desc = float_frame.describe() # noqa
def test_join(self, float_frame):
left = float_frame.loc[:, ['A', 'B']]
right = float_frame.loc[:, ['C', 'D']]
joined = left.join(right)
tm.assert_sp_frame_equal(joined, float_frame, exact_indices=False)
right = float_frame.loc[:, ['B', 'D']]
pytest.raises(Exception, left.join, right)
with tm.assert_raises_regex(ValueError,
'Other Series must have a name'):
float_frame.join(Series(
np.random.randn(len(float_frame)), index=float_frame.index))
def test_reindex(self, float_frame, float_frame_int_kind,
float_frame_fill0, float_frame_fill2):
def _check_frame(frame):
index = frame.index
sidx = index[::2]
sidx2 = index[:5] # noqa
sparse_result = frame.reindex(sidx)
dense_result = frame.to_dense().reindex(sidx)
tm.assert_frame_equal(sparse_result.to_dense(), dense_result)
tm.assert_frame_equal(frame.reindex(list(sidx)).to_dense(),
dense_result)
sparse_result2 = sparse_result.reindex(index)
dense_result2 = dense_result.reindex(index)
tm.assert_frame_equal(sparse_result2.to_dense(), dense_result2)
# propagate CORRECT fill value
tm.assert_almost_equal(sparse_result.default_fill_value,
frame.default_fill_value)
tm.assert_almost_equal(sparse_result['A'].fill_value,
frame['A'].fill_value)
# length zero
length_zero = frame.reindex([])
assert len(length_zero) == 0
assert len(length_zero.columns) == len(frame.columns)
assert len(length_zero['A']) == 0
# frame being reindexed has length zero
length_n = length_zero.reindex(index)
assert len(length_n) == len(frame)
assert len(length_n.columns) == len(frame.columns)
assert len(length_n['A']) == len(frame)
# reindex columns
reindexed = frame.reindex(columns=['A', 'B', 'Z'])
assert len(reindexed.columns) == 3
tm.assert_almost_equal(reindexed['Z'].fill_value,
frame.default_fill_value)
assert np.isnan(reindexed['Z'].sp_values).all()
_check_frame(float_frame)
_check_frame(float_frame_int_kind)
_check_frame(float_frame_fill0)
_check_frame(float_frame_fill2)
# with copy=False
reindexed = float_frame.reindex(float_frame.index, copy=False)
reindexed['F'] = reindexed['A']
assert 'F' in float_frame
reindexed = float_frame.reindex(float_frame.index)
reindexed['G'] = reindexed['A']
assert 'G' not in float_frame
def test_reindex_fill_value(self, float_frame_fill0,
float_frame_fill0_dense):
rng = bdate_range('20110110', periods=20)
result = float_frame_fill0.reindex(rng, fill_value=0)
exp = float_frame_fill0_dense.reindex(rng, fill_value=0)
exp = exp.to_sparse(float_frame_fill0.default_fill_value)
tm.assert_sp_frame_equal(result, exp)
def test_reindex_method(self):
sparse = SparseDataFrame(data=[[11., 12., 14.],
[21., 22., 24.],
[41., 42., 44.]],
index=[1, 2, 4],
columns=[1, 2, 4],
dtype=float)
# Over indices
# default method
result = sparse.reindex(index=range(6))
expected = SparseDataFrame(data=[[nan, nan, nan],
[11., 12., 14.],
[21., 22., 24.],
[nan, nan, nan],
[41., 42., 44.],
[nan, nan, nan]],
index=range(6),
columns=[1, 2, 4],
dtype=float)
tm.assert_sp_frame_equal(result, expected)
# method='bfill'
result = sparse.reindex(index=range(6), method='bfill')
expected = SparseDataFrame(data=[[11., 12., 14.],
[11., 12., 14.],
[21., 22., 24.],
[41., 42., 44.],
[41., 42., 44.],
[nan, nan, nan]],
index=range(6),
columns=[1, 2, 4],
dtype=float)
tm.assert_sp_frame_equal(result, expected)
# method='ffill'
result = sparse.reindex(index=range(6), method='ffill')
expected = SparseDataFrame(data=[[nan, nan, nan],
[11., 12., 14.],
[21., 22., 24.],
[21., 22., 24.],
[41., 42., 44.],
[41., 42., 44.]],
index=range(6),
columns=[1, 2, 4],
dtype=float)
tm.assert_sp_frame_equal(result, expected)
# Over columns
# default method
result = sparse.reindex(columns=range(6))
expected = SparseDataFrame(data=[[nan, 11., 12., nan, 14., nan],
[nan, 21., 22., nan, 24., nan],
[nan, 41., 42., nan, 44., nan]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
tm.assert_sp_frame_equal(result, expected)
# method='bfill'
with pytest.raises(NotImplementedError):
sparse.reindex(columns=range(6), method='bfill')
# method='ffill'
with pytest.raises(NotImplementedError):
sparse.reindex(columns=range(6), method='ffill')
def test_take(self, float_frame):
result = float_frame.take([1, 0, 2], axis=1)
expected = float_frame.reindex(columns=['B', 'A', 'C'])
tm.assert_sp_frame_equal(result, expected)
def test_to_dense(self, float_frame, float_frame_int_kind,
float_frame_dense,
float_frame_fill0, float_frame_fill0_dense,
float_frame_fill2, float_frame_fill2_dense):
def _check(frame, orig):
dense_dm = frame.to_dense()
# Sparse[float] != float
tm.assert_frame_equal(frame, dense_dm, check_dtype=False)
tm.assert_frame_equal(dense_dm, orig, check_dtype=False)
_check(float_frame, float_frame_dense)
_check(float_frame_int_kind, float_frame_dense)
_check(float_frame_fill0, float_frame_fill0_dense)
_check(float_frame_fill2, float_frame_fill2_dense)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
def test_stack_sparse_frame(self, float_frame, float_frame_int_kind,
float_frame_fill0, float_frame_fill2):
def _check(frame):
dense_frame = frame.to_dense() # noqa
wp = Panel.from_dict({'foo': frame})
from_dense_lp = wp.to_frame()
from_sparse_lp = spf.stack_sparse_frame(frame)
tm.assert_numpy_array_equal(from_dense_lp.values,
from_sparse_lp.values)
_check(float_frame)
_check(float_frame_int_kind)
# for now
pytest.raises(Exception, _check, float_frame_fill0)
pytest.raises(Exception, _check, float_frame_fill2)
def test_transpose(self, float_frame, float_frame_int_kind,
float_frame_dense,
float_frame_fill0, float_frame_fill0_dense,
float_frame_fill2, float_frame_fill2_dense):
def _check(frame, orig):
transposed = frame.T
untransposed = transposed.T
tm.assert_sp_frame_equal(frame, untransposed)
tm.assert_frame_equal(frame.T.to_dense(), orig.T)
tm.assert_frame_equal(frame.T.T.to_dense(), orig.T.T)
tm.assert_sp_frame_equal(frame, frame.T.T, exact_indices=False)
_check(float_frame, float_frame_dense)
_check(float_frame_int_kind, float_frame_dense)
_check(float_frame_fill0, float_frame_fill0_dense)
_check(float_frame_fill2, float_frame_fill2_dense)
def test_shift(self, float_frame, float_frame_int_kind, float_frame_dense,
float_frame_fill0, float_frame_fill0_dense,
float_frame_fill2, float_frame_fill2_dense):
def _check(frame, orig):
shifted = frame.shift(0)
exp = orig.shift(0)
tm.assert_frame_equal(shifted.to_dense(), exp)
shifted = frame.shift(1)
exp = orig.shift(1)
tm.assert_frame_equal(shifted.to_dense(), exp)
shifted = frame.shift(-2)
exp = orig.shift(-2)
tm.assert_frame_equal(shifted.to_dense(), exp)
shifted = frame.shift(2, freq='B')
exp = orig.shift(2, freq='B')
exp = exp.to_sparse(frame.default_fill_value,
kind=frame.default_kind)
tm.assert_frame_equal(shifted, exp)
shifted = frame.shift(2, freq=BDay())
exp = orig.shift(2, freq=BDay())
exp = exp.to_sparse(frame.default_fill_value,
kind=frame.default_kind)
tm.assert_frame_equal(shifted, exp)
_check(float_frame, float_frame_dense)
_check(float_frame_int_kind, float_frame_dense)
_check(float_frame_fill0, float_frame_fill0_dense)
_check(float_frame_fill2, float_frame_fill2_dense)
def test_count(self, float_frame):
dense_result = float_frame.to_dense().count()
result = float_frame.count()
tm.assert_series_equal(result.to_dense(), dense_result)
result = float_frame.count(axis=None)
tm.assert_series_equal(result.to_dense(), dense_result)
result = float_frame.count(axis=0)
tm.assert_series_equal(result.to_dense(), dense_result)
result = float_frame.count(axis=1)
dense_result = float_frame.to_dense().count(axis=1)
# win32 don't check dtype
tm.assert_series_equal(result, dense_result, check_dtype=False)
def test_numpy_transpose(self):
sdf = SparseDataFrame([1, 2, 3], index=[1, 2, 3], columns=['a'])
result = np.transpose(np.transpose(sdf))
tm.assert_sp_frame_equal(result, sdf)
msg = "the 'axes' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.transpose, sdf, axes=1)
def test_combine_first(self, float_frame):
df = float_frame
result = df[::2].combine_first(df)
expected = df[::2].to_dense().combine_first(df.to_dense())
expected = expected.to_sparse(fill_value=df.default_fill_value)
tm.assert_sp_frame_equal(result, expected)
@pytest.mark.xfail(reason="No longer supported.", strict=True)
def test_combine_first_with_dense(self):
# We could support this if we allow
# pd.core.dtypes.cast.find_common_type to special case SparseDtype
# but I don't think that's worth it.
df = self.frame
result = df[::2].combine_first(df.to_dense())
expected = df[::2].to_dense().combine_first(df.to_dense())
expected = expected.to_sparse(fill_value=df.default_fill_value)
tm.assert_sp_frame_equal(result, expected)
def test_combine_add(self, float_frame):
df = float_frame.to_dense()
df2 = df.copy()
df2['C'][:3] = np.nan
df['A'][:3] = 5.7
result = df.to_sparse().add(df2.to_sparse(), fill_value=0)
expected = df.add(df2, fill_value=0).to_sparse()
tm.assert_sp_frame_equal(result, expected)
def test_isin(self):
sparse_df = DataFrame({'flag': [1., 0., 1.]}).to_sparse(fill_value=0.)
xp = sparse_df[sparse_df.flag == 1.]
rs = sparse_df[sparse_df.flag.isin([1.])]
tm.assert_frame_equal(xp, rs)
def test_sparse_pow_issue(self):
# 2220
df = SparseDataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
# note : no error without nan
df = SparseDataFrame({'A': [nan, 0, 1]})
# note that 2 ** df works fine, also df ** 1
result = 1 ** df
r1 = result.take([0], 1)['A']
r2 = result['A']
assert len(r2.sp_values) == len(r1.sp_values)
def test_as_blocks(self):
df = SparseDataFrame({'A': [1.1, 3.3], 'B': [nan, -3.9]},
dtype='float64')
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
df_blocks = df.blocks
assert list(df_blocks.keys()) == ['Sparse[float64, nan]']
tm.assert_frame_equal(df_blocks['Sparse[float64, nan]'], df)
@pytest.mark.xfail(reason='nan column names in _init_dict problematic '
'(GH#16894)',
strict=True)
def test_nan_columnname(self):
# GH 8822
nan_colname = DataFrame(Series(1.0, index=[0]), columns=[nan])
nan_colname_sparse = nan_colname.to_sparse()
assert np.isnan(nan_colname_sparse.columns[0])
def test_isna(self):
# GH 8276
df = pd.SparseDataFrame({'A': [np.nan, np.nan, 1, 2, np.nan],
'B': [0, np.nan, np.nan, 2, np.nan]})
res = df.isna()
exp = pd.SparseDataFrame({'A': [True, True, False, False, True],
'B': [False, True, True, False, True]},
default_fill_value=True)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# if fill_value is not nan, True can be included in sp_values
df = pd.SparseDataFrame({'A': [0, 0, 1, 2, np.nan],
'B': [0, np.nan, 0, 2, np.nan]},
default_fill_value=0.)
res = df.isna()
assert isinstance(res, pd.SparseDataFrame)
exp = pd.DataFrame({'A': [False, False, False, False, True],
'B': [False, True, False, False, True]})
tm.assert_frame_equal(res.to_dense(), exp)
def test_notna(self):
# GH 8276
df = pd.SparseDataFrame({'A': [np.nan, np.nan, 1, 2, np.nan],
'B': [0, np.nan, np.nan, 2, np.nan]})
res = df.notna()
exp = pd.SparseDataFrame({'A': [False, False, True, True, False],
'B': [True, False, False, True, False]},
default_fill_value=False)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# if fill_value is not nan, True can be included in sp_values
df = pd.SparseDataFrame({'A': [0, 0, 1, 2, np.nan],
'B': [0, np.nan, 0, 2, np.nan]},
default_fill_value=0.)
res = df.notna()
assert isinstance(res, pd.SparseDataFrame)
exp = pd.DataFrame({'A': [True, True, True, True, False],
'B': [True, False, True, True, False]})
tm.assert_frame_equal(res.to_dense(), exp)
class TestSparseDataFrameArithmetic(object):
def test_numeric_op_scalar(self):
df = pd.DataFrame({'A': [nan, nan, 0, 1, ],
'B': [0, 1, 2, nan],
'C': [1., 2., 3., 4.],
'D': [nan, nan, nan, nan]})
sparse = df.to_sparse()
tm.assert_sp_frame_equal(sparse + 1, (df + 1).to_sparse())
def test_comparison_op_scalar(self):
# GH 13001
df = pd.DataFrame({'A': [nan, nan, 0, 1, ],
'B': [0, 1, 2, nan],
'C': [1., 2., 3., 4.],
'D': [nan, nan, nan, nan]})
sparse = df.to_sparse()
# comparison changes internal repr, compare with dense
res = sparse > 1
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), df > 1)
res = sparse != 0
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), df != 0)
class TestSparseDataFrameAnalytics(object):
def test_cumsum(self, float_frame):
expected = SparseDataFrame(float_frame.to_dense().cumsum())
result = float_frame.cumsum()
tm.assert_sp_frame_equal(result, expected)
result = float_frame.cumsum(axis=None)
tm.assert_sp_frame_equal(result, expected)
result = float_frame.cumsum(axis=0)
tm.assert_sp_frame_equal(result, expected)
def test_numpy_cumsum(self, float_frame):
result = np.cumsum(float_frame)
expected = SparseDataFrame(float_frame.to_dense().cumsum())
tm.assert_sp_frame_equal(result, expected)
msg = "the 'dtype' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.cumsum,
float_frame, dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.cumsum,
float_frame, out=result)
def test_numpy_func_call(self, float_frame):
# no exception should be raised even though
# numpy passes in 'axis=None' or `axis=-1'
funcs = ['sum', 'cumsum', 'var',
'mean', 'prod', 'cumprod',
'std', 'min', 'max']
for func in funcs:
getattr(np, func)(float_frame)
@pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH 17386)',
strict=True)
def test_quantile(self):
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [nan, nan]]
q = 0.1
sparse_df = SparseDataFrame(data)
result = sparse_df.quantile(q)
dense_df = DataFrame(data)
dense_expected = dense_df.quantile(q)
sparse_expected = SparseSeries(dense_expected)
tm.assert_series_equal(result, dense_expected)
tm.assert_sp_series_equal(result, sparse_expected)
@pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH 17386)',
strict=True)
def test_quantile_multi(self):
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [nan, nan]]
q = [0.1, 0.5]
sparse_df = SparseDataFrame(data)
result = sparse_df.quantile(q)
dense_df = DataFrame(data)
dense_expected = dense_df.quantile(q)
sparse_expected = SparseDataFrame(dense_expected)
tm.assert_frame_equal(result, dense_expected)
tm.assert_sp_frame_equal(result, sparse_expected)
def test_assign_with_sparse_frame(self):
# GH 19163
df = pd.DataFrame({"a": [1, 2, 3]})
res = df.to_sparse(fill_value=False).assign(newcol=False)
exp = df.assign(newcol=False).to_sparse(fill_value=False)
tm.assert_sp_frame_equal(res, exp)
for column in res.columns:
assert type(res[column]) is SparseSeries
|
# -*- coding: utf-8 -*-
"""
@author: abhilash
"""
import numpy as np
import cv2
#get the saved video file as stream
file_video_stream = cv2.VideoCapture('images/testing/video_sample.mp4')
#create a while loop
while (file_video_stream.isOpened):
#get the current frame from video stream
ret,current_frame = file_video_stream.read()
#use the video current frame instead of image
img_to_detect = current_frame
img_height = img_to_detect.shape[0]
img_width = img_to_detect.shape[1]
# resize to match input size, convert to blob to pass into model
resized_img_to_detect = cv2.resize(img_to_detect,(300,300))
img_blob = cv2.dnn.blobFromImage(resized_img_to_detect,0.007843,(300,300),127.5)
#recommended scale factor is 0.007843, width,height of blob is 300,300, mean of 255 is 127.5,
# set of 21 class labels in alphabetical order (background + rest of 20 classes)
class_labels = ["background", "aeroplane", "bicycle", "bird", "boat","bottle", "bus", "car", "cat", "chair", "cow", "diningtable","dog", "horse", "motorbike", "person", "pottedplant", "sheep","sofa", "train", "tvmonitor"]
# Loading pretrained model from prototext and caffemodel files
# input preprocessed blob into model and pass through the model
# obtain the detection predictions by the model using forward() method
mobilenetssd = cv2.dnn.readNetFromCaffe('dataset/mobilenetssd.prototext','dataset/mobilenetssd.caffemodel')
mobilenetssd.setInput(img_blob)
obj_detections = mobilenetssd.forward()
# returned obj_detections[0, 0, index, 1] , 1 => will have the prediction class index
# 2 => will have confidence, 3 to 7 => will have the bounding box co-ordinates
no_of_detections = obj_detections.shape[2]
# loop over the detections
for index in np.arange(0, no_of_detections):
prediction_confidence = obj_detections[0, 0, index, 2]
# take only predictions with confidence more than 20%
if prediction_confidence > 0.20:
#get the predicted label
predicted_class_index = int(obj_detections[0, 0, index, 1])
predicted_class_label = class_labels[predicted_class_index]
#obtain the bounding box co-oridnates for actual image from resized image size
bounding_box = obj_detections[0, 0, index, 3:7] * np.array([img_width, img_height, img_width, img_height])
(start_x_pt, start_y_pt, end_x_pt, end_y_pt) = bounding_box.astype("int")
# print the prediction in console
predicted_class_label = "{}: {:.2f}%".format(class_labels[predicted_class_index], prediction_confidence * 100)
print("predicted object {}: {}".format(index+1, predicted_class_label))
# draw rectangle and text in the image
cv2.rectangle(img_to_detect, (start_x_pt, start_y_pt), (end_x_pt, end_y_pt), (0,255,0), 2)
cv2.putText(img_to_detect, predicted_class_label, (start_x_pt, start_y_pt-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1)
cv2.imshow("Detection Output", img_to_detect)
#terminate while loop if 'q' key is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#releasing the stream
#close all opencv windows
file_video_stream.release()
cv2.destroyAllWindows()
|
#recursive factorial
def factorial(n):
if (n == 0):
return 1
else:
return n * factorial(n - 1)
#iterative factorial
def factorial(n):
total = 1
for i in range(1,n+1,1):
total *= i
return total
#recursive greatest common divisor
def gcd(a,b):
if (b == 0):
return a
else:
return gcd(b,a % b)
#iterative greatest common divisor (broken)
def gcd_bad(a,b):
while (b != 0):
a = b
b = a % b
return a
#iterative greatest common divisor (broken)
def gcd_bad2(a,b):
while (b != 0):
b = a % b
a = b
return a
#iterative greatest common divisor (correct)
def gcd(a,b):
while (b != 0):
temp = b
b = a % b
a = temp
return a
#iterative greatest common divisor (Python specific swap)
def gcd2(a,b):
while (b != 0):
a,b = b,a % b
return a
#recursive fibonacci (inefficient)
def fib(n):
if (n < 2):
return n
else:
return fib(n - 1) + fib(n - 2)
#iterative fibonacci
def fib(n):
a = 0 # the first Fibonacci number
b = 1 # the second Fibonacci number
for i in range(0,n,1):
c = a + b
a = b
b = c
return a
#recursive fibonacci (efficient)
def fib(n):
def loop(a,b,i):
if (i < n):
return loop(b,a + b,i + 1)
else:
return a
return loop(0,1,0)
#recursive factorial
def fact(n):
total = 1
for i in range(1,n+1,1):
total *= i
return total
#recursive factorial (tail recursive)
def fact2(n):
def loop(total,i):
if (i < n + 1):
return loop(total * i,i + 1)
else:
return total
return loop(1,1)
|
"""Tests for the ``utils`` module."""
from datetime import datetime
from aiofacepy import (
get_application_access_token,
get_extended_access_token,
GraphAPI
)
from mock import patch
from nose.tools import (
assert_equal,
assert_raises,
with_setup
)
mock_request = None
patch = patch('requests.session')
def mock():
global mock_request
mock_request = patch.start()().request
def unmock():
patch.stop()
@with_setup(mock, unmock)
def test_get_extended_access_token():
mock_request.return_value.status_code = 200
mock_request.return_value.content = 'access_token=<extended access token>&expires=5183994'
access_token, expires_at = get_extended_access_token(
'<access token>',
'<application id>',
'<application secret key>'
)
mock_request.assert_called_with(
'GET',
'https://graph.facebook.com/oauth/access_token',
allow_redirects=True,
verify=True,
timeout=None,
params={
'client_id': '<application id>',
'client_secret': '<application secret key>',
'grant_type': 'fb_exchange_token',
'fb_exchange_token': '<access token>'
}
)
assert_equal(access_token, '<extended access token>')
assert isinstance(expires_at, datetime)
@with_setup(mock, unmock)
def test_get_extended_access_token_v23_plus():
mock_request.return_value.status_code = 200
mock_request.return_value.content = (
'{"access_token":"<extended access token>","token_type":"bearer"}'
)
access_token, expires_at = get_extended_access_token(
'<access token>',
'<application id>',
'<application secret key>',
api_version='2.3'
)
mock_request.assert_called_with(
'GET',
'https://graph.facebook.com/v2.3/oauth/access_token',
allow_redirects=True,
verify=True,
timeout=None,
params={
'client_id': '<application id>',
'client_secret': '<application secret key>',
'grant_type': 'fb_exchange_token',
'fb_exchange_token': '<access token>'
}
)
assert_equal(access_token, '<extended access token>')
assert not expires_at
@with_setup(mock, unmock)
def test_get_extended_access_token_no_expiry():
mock_request.return_value.status_code = 200
mock_request.return_value.content = 'access_token=<extended access token>'
access_token, expires_at = get_extended_access_token(
'<access token>',
'<application id>',
'<application secret key>'
)
mock_request.assert_called_with(
'GET',
'https://graph.facebook.com/oauth/access_token',
allow_redirects=True,
verify=True,
timeout=None,
params={
'client_id': '<application id>',
'client_secret': '<application secret key>',
'grant_type': 'fb_exchange_token',
'fb_exchange_token': '<access token>'
}
)
assert_equal(access_token, '<extended access token>')
assert expires_at is None
@with_setup(mock, unmock)
def test_get_application_access_token():
mock_request.return_value.status_code = 200
mock_request.return_value.content = 'access_token=<application access token>'
access_token = get_application_access_token(
'<application id>',
'<application secret key>'
)
mock_request.assert_called_with(
'GET',
'https://graph.facebook.com/oauth/access_token',
allow_redirects=True,
verify=True,
timeout=None,
params={
'client_id': '<application id>',
'client_secret': '<application secret key>',
'grant_type': 'client_credentials'
}
)
assert_equal(access_token, '<application access token>')
@with_setup(mock, unmock)
def test_get_application_access_token_v23_plus():
mock_request.return_value.status_code = 200
mock_request.return_value.content = (
'{"access_token":"<application access token>","token_type":"bearer"}'
)
access_token, expires_at = get_application_access_token(
'<application id>',
'<application secret key>',
api_version='2.3'
)
mock_request.assert_called_with(
'GET',
'https://graph.facebook.com/v2.3/oauth/access_token',
allow_redirects=True,
verify=True,
timeout=None,
params={
'client_id': '<application id>',
'client_secret': '<application secret key>',
'grant_type': 'client_credentials'
}
)
assert_equal(access_token, '<application access token>')
@with_setup(mock, unmock)
def test_get_application_access_token_raises_error():
mock_request.return_value.status_code = 200
mock_request.return_value.content = 'An unknown error occurred'
assert_raises(
GraphAPI.FacebookError,
get_application_access_token,
'<application id>',
'<application secret key>'
)
|
# -*- coding: utf-8 -*-
"""
All of our extensions are initialized here. They are registered in
app.py:register_extensions upon app creation
"""
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
from flask.ext.migrate import Migrate
migrate = Migrate()
from flask.ext.debugtoolbar import DebugToolbarExtension
debug_toolbar = DebugToolbarExtension()
from flask.ext.bcrypt import Bcrypt
bcrypt = Bcrypt()
from flask.ext.admin import Admin
admin = Admin(name="I am a Code Ninja!", template_mode='bootstrap3')
from flask.ext.login import LoginManager
login_manager = LoginManager()
from flask.ext.assets import Environment
assets = Environment()
|
"""
Prediction of Users based on Tweet embeddings.
"""
import pickle
import numpy as np
from sklearn.linear_model import LogisticRegression
from .models import User
from .twitter import BASILICA
def predict_user(user1_name, user2_name, tweet_text, cache=None):
"""Determine and return which user is more likely to say something"""
user_set = pickle.dumps((user1_name, user2_name))
if cache and cache.exists(user_set):
log_reg = pickle.loads(cache.get(user_set))
else:
#get the users
user1 = User.query.filter(User.name == user1_name).one()
user2 = User.query.filter(User.name == user2_name).one()
#get their embeddings
user1_embeddings = np.array([tweet.embedding for tweet in user1.tweets])
user2_embeddings = np.array([tweet.embedding for tweet in user2.tweets])
#split into array
embeddings = np.vstack([user1_embeddings, user2_embeddings])
labels = np.concatenate([np.ones(len(user1.tweets)),
np.zeros(len(user2.tweets))])
#fit the LogisticRegression model
log_reg = LogisticRegression().fit(embeddings, labels)
cache and cache.set(user_set, pickle.dumps(log_reg))
tweet_embedding = BASILICA.embed_sentence(tweet_text, model='twitter')
return log_reg.predict(np.array(tweet_embedding).reshape(1, -1))
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from django import http
from django.core.urlresolvers import reverse
from django_openstack import api
from django_openstack.tests.view_tests import base
from mox import IsA
class ObjectViewTests(base.BaseViewTests):
CONTAINER_NAME = 'containerName'
def setUp(self):
super(ObjectViewTests, self).setUp()
swift_object = self.mox.CreateMock(api.SwiftObject)
self.swift_objects = [swift_object]
def test_index(self):
self.mox.StubOutWithMock(api, 'swift_get_objects')
api.swift_get_objects(
IsA(http.HttpRequest),
self.CONTAINER_NAME,
marker=None).AndReturn(self.swift_objects)
self.mox.ReplayAll()
res = self.client.get(reverse('dash_objects',
args=[self.TEST_TENANT,
self.CONTAINER_NAME]))
self.assertTemplateUsed(res,
'django_openstack/dash/objects/index.html')
self.assertItemsEqual(res.context['objects'], self.swift_objects)
self.mox.VerifyAll()
def test_upload_index(self):
res = self.client.get(reverse('dash_objects_upload',
args=[self.TEST_TENANT,
self.CONTAINER_NAME]))
self.assertTemplateUsed(res,
'django_openstack/dash/objects/upload.html')
def test_upload(self):
OBJECT_DATA = 'objectData'
OBJECT_FILE = tempfile.TemporaryFile()
OBJECT_FILE.write(OBJECT_DATA)
OBJECT_FILE.flush()
OBJECT_FILE.seek(0)
OBJECT_NAME = 'objectName'
formData = {'method': 'UploadObject',
'container_name': self.CONTAINER_NAME,
'name': OBJECT_NAME,
'object_file': OBJECT_FILE}
self.mox.StubOutWithMock(api, 'swift_upload_object')
api.swift_upload_object(IsA(http.HttpRequest),
unicode(self.CONTAINER_NAME),
unicode(OBJECT_NAME),
OBJECT_DATA)
self.mox.ReplayAll()
res = self.client.post(reverse('dash_objects_upload',
args=[self.TEST_TENANT,
self.CONTAINER_NAME]),
formData)
self.assertRedirectsNoFollow(res, reverse('dash_objects_upload',
args=[self.TEST_TENANT,
self.CONTAINER_NAME]))
self.mox.VerifyAll()
def test_delete(self):
OBJECT_NAME = 'objectName'
formData = {'method': 'DeleteObject',
'container_name': self.CONTAINER_NAME,
'object_name': OBJECT_NAME}
self.mox.StubOutWithMock(api, 'swift_delete_object')
api.swift_delete_object(
IsA(http.HttpRequest),
self.CONTAINER_NAME, OBJECT_NAME)
self.mox.ReplayAll()
res = self.client.post(reverse('dash_objects',
args=[self.TEST_TENANT,
self.CONTAINER_NAME]),
formData)
self.assertRedirectsNoFollow(res, reverse('dash_objects',
args=[self.TEST_TENANT,
self.CONTAINER_NAME]))
self.mox.VerifyAll()
def test_download(self):
OBJECT_DATA = 'objectData'
OBJECT_NAME = 'objectName'
self.mox.StubOutWithMock(api, 'swift_get_object_data')
api.swift_get_object_data(IsA(http.HttpRequest),
unicode(self.CONTAINER_NAME),
unicode(OBJECT_NAME)).AndReturn(OBJECT_DATA)
self.mox.ReplayAll()
res = self.client.get(reverse('dash_objects_download',
args=[self.TEST_TENANT,
self.CONTAINER_NAME,
OBJECT_NAME]))
self.assertEqual(res.content, OBJECT_DATA)
self.assertTrue(res.has_header('Content-Disposition'))
self.mox.VerifyAll()
def test_copy_index(self):
OBJECT_NAME = 'objectName'
container = self.mox.CreateMock(api.Container)
container.name = self.CONTAINER_NAME
self.mox.StubOutWithMock(api, 'swift_get_containers')
api.swift_get_containers(
IsA(http.HttpRequest)).AndReturn([container])
self.mox.ReplayAll()
res = self.client.get(reverse('dash_object_copy',
args=[self.TEST_TENANT,
self.CONTAINER_NAME,
OBJECT_NAME]))
self.assertTemplateUsed(res,
'django_openstack/dash/objects/copy.html')
self.mox.VerifyAll()
def test_copy(self):
NEW_CONTAINER_NAME = self.CONTAINER_NAME
NEW_OBJECT_NAME = 'newObjectName'
ORIG_CONTAINER_NAME = 'origContainerName'
ORIG_OBJECT_NAME = 'origObjectName'
formData = {'method': 'CopyObject',
'new_container_name': NEW_CONTAINER_NAME,
'new_object_name': NEW_OBJECT_NAME,
'orig_container_name': ORIG_CONTAINER_NAME,
'orig_object_name': ORIG_OBJECT_NAME}
container = self.mox.CreateMock(api.Container)
container.name = self.CONTAINER_NAME
self.mox.StubOutWithMock(api, 'swift_get_containers')
api.swift_get_containers(
IsA(http.HttpRequest)).AndReturn([container])
self.mox.StubOutWithMock(api, 'swift_copy_object')
api.swift_copy_object(IsA(http.HttpRequest),
ORIG_CONTAINER_NAME,
ORIG_OBJECT_NAME,
NEW_CONTAINER_NAME,
NEW_OBJECT_NAME)
self.mox.ReplayAll()
res = self.client.post(reverse('dash_object_copy',
args=[self.TEST_TENANT,
ORIG_CONTAINER_NAME,
ORIG_OBJECT_NAME]),
formData)
self.assertRedirectsNoFollow(res, reverse('dash_object_copy',
args=[self.TEST_TENANT,
ORIG_CONTAINER_NAME,
ORIG_OBJECT_NAME]))
self.mox.VerifyAll()
def test_filter(self):
PREFIX = 'prefix'
formData = {'method': 'FilterObjects',
'container_name': self.CONTAINER_NAME,
'object_prefix': PREFIX,
}
self.mox.StubOutWithMock(api, 'swift_get_objects')
api.swift_get_objects(IsA(http.HttpRequest),
unicode(self.CONTAINER_NAME),
prefix=unicode(PREFIX)
).AndReturn(self.swift_objects)
self.mox.ReplayAll()
res = self.client.post(reverse('dash_objects',
args=[self.TEST_TENANT,
self.CONTAINER_NAME]),
formData)
self.assertTemplateUsed(res,
'django_openstack/dash/objects/index.html')
self.mox.VerifyAll()
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import struct
import six
import numpy as np
import oneflow.core.record.record_pb2 as ofrecord
import cv2
import oneflow as flow
from PIL import Image, ImageOps
import oneflow.typing as tp
def is_image_file(filename):
return any(filename.endswith(extension) for extension in ['.png', '.jpg', '.jpeg', '.PNG', '.JPG', '.JPEG'])
def load_dataset(data_dir, mode, hr_size, lr_size, npy=True):
"""
image transform: randomly crop, mirror, normalization(0,1), transpose(bs, img_channel, h, w) and shuffle
"""
images_dir = os.path.join(data_dir, mode)
hr_imgs, lr_imgs = [], []
for idx, d in enumerate(os.listdir(images_dir)):
d = os.path.join(images_dir, d)
print(d)
if not is_image_file(d):
print("The file is not an image in:{}, so we continune next one.".format(d))
continue
img = Image.open(d)
# resize to 128 x 128 x 3, and randomly crop to 88 x 88 x 3
r1, r2 = np.random.randint(40, size=2)
hr_img = img.resize((hr_size + 40, hr_size + 40))
hr_img = hr_img.crop((r1, r2, r1 + hr_size, r2 + hr_size))
# shape of lr_img is 22 x 22 x 3
# resize cropped hr_img using Image.BICUBIC
lr_img = hr_img.resize((lr_size, lr_size), resample=3)
if np.random.rand() > 0.5:
# random mirroring
hr_img = ImageOps.mirror(hr_img)
lr_img = ImageOps.mirror(lr_img)
# normalizing the images to [0, 1]
hr_img = np.array(hr_img) / 255.
lr_img = np.array(lr_img) / 255.
hr_img = hr_img.transpose(2, 0, 1)
lr_img = lr_img.transpose(2, 0, 1)
assert hr_img.shape == (3, hr_size, hr_size), hr_img.shape
assert lr_img.shape == (3, lr_size, lr_size), lr_img.shape
hr_imgs.append(hr_img) # 425 x 3 x 88 x 88
lr_imgs.append(lr_img) # 425 x 3 x 22 x 22
# shuffle
seed = 1024
np.random.seed(seed)
np.random.shuffle(hr_imgs)
np.random.seed(seed)
np.random.shuffle(lr_imgs)
if npy:
hr_imgs_save_path = os.path.join(data_dir, "{}_{}hr_imgs.npy".format(mode, hr_size))
lr_imgs_save_path = os.path.join(data_dir, "{}_{}lr_imgs.npy".format(mode, lr_size))
if hr_imgs != None:
with open(hr_imgs_save_path, "wb") as f:
print(hr_imgs_save_path)
np.save(f, hr_imgs)
f.close()
if lr_imgs != None:
with open(lr_imgs_save_path, "wb") as f:
print(lr_imgs_save_path)
np.save(f, lr_imgs)
f.close()
def load_image(image_path):
# to RGB
image = cv2.imread(image_path)[:,:,::-1]
H, W = image.shape[:2]
image = (np.array(image) / 255.).astype(np.float32)
image = image.transpose(2, 0, 1)
image = np.ascontiguousarray(image)
return image, H, W, image_path.split(".", 1)[0]+"_result." + image_path.split(".", 1)[1]
if __name__ == "__main__":
crop_size = 88
upscale_factor = 4
lr_size = crop_size // upscale_factor
data_dir = "./data"
modes = ["val", "train"]
for mode in modes:
load_dataset(data_dir, mode, crop_size, lr_size)
|
from __future__ import unicode_literals
import base64
import re
import datetime
from moto.core import BaseBackend, BaseModel
from moto.core.exceptions import AWSError
from moto.ec2 import ec2_backends
from moto import settings
from .utils import make_arn_for_certificate
import cryptography.x509
import cryptography.hazmat.primitives.asymmetric.rsa
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.backends import default_backend
from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID
AWS_ROOT_CA = b"""-----BEGIN CERTIFICATE-----
MIIESTCCAzGgAwIBAgITBntQXCplJ7wevi2i0ZmY7bibLDANBgkqhkiG9w0BAQsF
ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
b24gUm9vdCBDQSAxMB4XDTE1MTAyMTIyMjQzNFoXDTQwMTAyMTIyMjQzNFowRjEL
MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEVMBMGA1UECxMMU2VydmVyIENB
IDFCMQ8wDQYDVQQDEwZBbWF6b24wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQDCThZn3c68asg3Wuw6MLAd5tES6BIoSMzoKcG5blPVo+sDORrMd4f2AbnZ
cMzPa43j4wNxhplty6aUKk4T1qe9BOwKFjwK6zmxxLVYo7bHViXsPlJ6qOMpFge5
blDP+18x+B26A0piiQOuPkfyDyeR4xQghfj66Yo19V+emU3nazfvpFA+ROz6WoVm
B5x+F2pV8xeKNR7u6azDdU5YVX1TawprmxRC1+WsAYmz6qP+z8ArDITC2FMVy2fw
0IjKOtEXc/VfmtTFch5+AfGYMGMqqvJ6LcXiAhqG5TI+Dr0RtM88k+8XUBCeQ8IG
KuANaL7TiItKZYxK1MMuTJtV9IblAgMBAAGjggE7MIIBNzASBgNVHRMBAf8ECDAG
AQH/AgEAMA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUWaRmBlKge5WSPKOUByeW
dFv5PdAwHwYDVR0jBBgwFoAUhBjMhTTsvAyUlC4IWZzHshBOCggwewYIKwYBBQUH
AQEEbzBtMC8GCCsGAQUFBzABhiNodHRwOi8vb2NzcC5yb290Y2ExLmFtYXpvbnRy
dXN0LmNvbTA6BggrBgEFBQcwAoYuaHR0cDovL2NybC5yb290Y2ExLmFtYXpvbnRy
dXN0LmNvbS9yb290Y2ExLmNlcjA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3Js
LnJvb3RjYTEuYW1hem9udHJ1c3QuY29tL3Jvb3RjYTEuY3JsMBMGA1UdIAQMMAow
CAYGZ4EMAQIBMA0GCSqGSIb3DQEBCwUAA4IBAQAfsaEKwn17DjAbi/Die0etn+PE
gfY/I6s8NLWkxGAOUfW2o+vVowNARRVjaIGdrhAfeWHkZI6q2pI0x/IJYmymmcWa
ZaW/2R7DvQDtxCkFkVaxUeHvENm6IyqVhf6Q5oN12kDSrJozzx7I7tHjhBK7V5Xo
TyS4NU4EhSyzGgj2x6axDd1hHRjblEpJ80LoiXlmUDzputBXyO5mkcrplcVvlIJi
WmKjrDn2zzKxDX5nwvkskpIjYlJcrQu4iCX1/YwZ1yNqF9LryjlilphHCACiHbhI
RnGfN8j8KLDVmWyTYMk8V+6j0LI4+4zFh2upqGMQHL3VFVFWBek6vCDWhB/b
-----END CERTIFICATE-----"""
# Added aws root CA as AWS returns chain you gave it + root CA (provided or not)
# so for now a cheap response is just give any old root CA
def datetime_to_epoch(date):
# As only Py3 has datetime.timestamp()
return int((date - datetime.datetime(1970, 1, 1)).total_seconds())
class AWSValidationException(AWSError):
TYPE = "ValidationException"
class AWSResourceNotFoundException(AWSError):
TYPE = "ResourceNotFoundException"
class AWSTooManyTagsException(AWSError):
TYPE = "TooManyTagsException"
class TagHolder(dict):
MAX_TAG_COUNT = 50
MAX_KEY_LENGTH = 128
MAX_VALUE_LENGTH = 256
def _validate_kv(self, key, value, index):
if len(key) > self.MAX_KEY_LENGTH:
raise AWSValidationException(
"Value '%s' at 'tags.%d.member.key' failed to satisfy constraint: Member must have length less than or equal to %s"
% (key, index, self.MAX_KEY_LENGTH)
)
if value and len(value) > self.MAX_VALUE_LENGTH:
raise AWSValidationException(
"Value '%s' at 'tags.%d.member.value' failed to satisfy constraint: Member must have length less than or equal to %s"
% (value, index, self.MAX_VALUE_LENGTH)
)
if key.startswith("aws:"):
raise AWSValidationException(
'Invalid Tag Key: "%s". AWS internal tags cannot be changed with this API'
% key
)
def add(self, tags):
tags_copy = self.copy()
for i, tag in enumerate(tags):
key = tag["Key"]
value = tag.get("Value", None)
self._validate_kv(key, value, i + 1)
tags_copy[key] = value
if len(tags_copy) > self.MAX_TAG_COUNT:
raise AWSTooManyTagsException(
"the TagSet: '{%s}' contains too many Tags"
% ", ".join(k + "=" + str(v or "") for k, v in tags_copy.items())
)
self.update(tags_copy)
def remove(self, tags):
for i, tag in enumerate(tags):
key = tag["Key"]
value = tag.get("Value", None)
self._validate_kv(key, value, i + 1)
try:
# If value isnt provided, just delete key
if value is None:
del self[key]
# If value is provided, only delete if it matches what already exists
elif self[key] == value:
del self[key]
except KeyError:
pass
def equals(self, tags):
tags = {t["Key"]: t.get("Value", None) for t in tags} if tags else {}
return self == tags
class CertBundle(BaseModel):
def __init__(
self,
certificate,
private_key,
chain=None,
region="us-east-1",
arn=None,
cert_type="IMPORTED",
cert_status="ISSUED",
):
self.created_at = datetime.datetime.now()
self.cert = certificate
self._cert = None
self.common_name = None
self.key = private_key
self._key = None
self.chain = chain
self.tags = TagHolder()
self._chain = None
self.type = cert_type # Should really be an enum
self.status = cert_status # Should really be an enum
self.in_use_by = []
# AWS always returns your chain + root CA
if self.chain is None:
self.chain = AWS_ROOT_CA
else:
self.chain += b"\n" + AWS_ROOT_CA
# Takes care of PEM checking
self.validate_pk()
self.validate_certificate()
if chain is not None:
self.validate_chain()
# TODO check cert is valid, or if self-signed then a chain is provided, otherwise
# raise AWSValidationException('Provided certificate is not a valid self signed. Please provide either a valid self-signed certificate or certificate chain.')
# Used for when one wants to overwrite an arn
if arn is None:
self.arn = make_arn_for_certificate(DEFAULT_ACCOUNT_ID, region)
else:
self.arn = arn
@classmethod
def generate_cert(cls, domain_name, region, sans=None):
if sans is None:
sans = set()
else:
sans = set(sans)
sans.add(domain_name)
sans = [cryptography.x509.DNSName(item) for item in sans]
key = cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend()
)
subject = cryptography.x509.Name(
[
cryptography.x509.NameAttribute(
cryptography.x509.NameOID.COUNTRY_NAME, "US"
),
cryptography.x509.NameAttribute(
cryptography.x509.NameOID.STATE_OR_PROVINCE_NAME, "CA"
),
cryptography.x509.NameAttribute(
cryptography.x509.NameOID.LOCALITY_NAME, "San Francisco"
),
cryptography.x509.NameAttribute(
cryptography.x509.NameOID.ORGANIZATION_NAME, "My Company"
),
cryptography.x509.NameAttribute(
cryptography.x509.NameOID.COMMON_NAME, domain_name
),
]
)
issuer = cryptography.x509.Name(
[ # C = US, O = Amazon, OU = Server CA 1B, CN = Amazon
cryptography.x509.NameAttribute(
cryptography.x509.NameOID.COUNTRY_NAME, "US"
),
cryptography.x509.NameAttribute(
cryptography.x509.NameOID.ORGANIZATION_NAME, "Amazon"
),
cryptography.x509.NameAttribute(
cryptography.x509.NameOID.ORGANIZATIONAL_UNIT_NAME, "Server CA 1B"
),
cryptography.x509.NameAttribute(
cryptography.x509.NameOID.COMMON_NAME, "Amazon"
),
]
)
cert = (
cryptography.x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(issuer)
.public_key(key.public_key())
.serial_number(cryptography.x509.random_serial_number())
.not_valid_before(datetime.datetime.utcnow())
.not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=365))
.add_extension(
cryptography.x509.SubjectAlternativeName(sans), critical=False
)
.sign(key, hashes.SHA512(), default_backend())
)
cert_armored = cert.public_bytes(serialization.Encoding.PEM)
private_key = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
return cls(
cert_armored,
private_key,
cert_type="AMAZON_ISSUED",
cert_status="PENDING_VALIDATION",
region=region,
)
def validate_pk(self):
try:
self._key = serialization.load_pem_private_key(
self.key, password=None, backend=default_backend()
)
if self._key.key_size > 2048:
AWSValidationException(
"The private key length is not supported. Only 1024-bit and 2048-bit are allowed."
)
except Exception as err:
if isinstance(err, AWSValidationException):
raise
raise AWSValidationException(
"The private key is not PEM-encoded or is not valid."
)
def validate_certificate(self):
try:
self._cert = cryptography.x509.load_pem_x509_certificate(
self.cert, default_backend()
)
now = datetime.datetime.utcnow()
if self._cert.not_valid_after < now:
raise AWSValidationException(
"The certificate has expired, is not valid."
)
if self._cert.not_valid_before > now:
raise AWSValidationException(
"The certificate is not in effect yet, is not valid."
)
# Extracting some common fields for ease of use
# Have to search through cert.subject for OIDs
self.common_name = self._cert.subject.get_attributes_for_oid(
cryptography.x509.OID_COMMON_NAME
)[0].value
except Exception as err:
if isinstance(err, AWSValidationException):
raise
raise AWSValidationException(
"The certificate is not PEM-encoded or is not valid."
)
def validate_chain(self):
try:
self._chain = []
for cert_armored in self.chain.split(b"-\n-"):
# Would leave encoded but Py2 does not have raw binary strings
cert_armored = cert_armored.decode()
# Fix missing -'s on split
cert_armored = re.sub(r"^----B", "-----B", cert_armored)
cert_armored = re.sub(r"E----$", "E-----", cert_armored)
cert = cryptography.x509.load_pem_x509_certificate(
cert_armored.encode(), default_backend()
)
self._chain.append(cert)
now = datetime.datetime.now()
if self._cert.not_valid_after < now:
raise AWSValidationException(
"The certificate chain has expired, is not valid."
)
if self._cert.not_valid_before > now:
raise AWSValidationException(
"The certificate chain is not in effect yet, is not valid."
)
except Exception as err:
if isinstance(err, AWSValidationException):
raise
raise AWSValidationException(
"The certificate is not PEM-encoded or is not valid."
)
def check(self):
# Basically, if the certificate is pending, and then checked again after a
# while, it will appear as if its been validated. The default wait time is 60
# seconds but you can set an environment to change it.
waited_seconds = (datetime.datetime.now() - self.created_at).total_seconds()
if (
self.type == "AMAZON_ISSUED"
and self.status == "PENDING_VALIDATION"
and waited_seconds > settings.ACM_VALIDATION_WAIT
):
self.status = "ISSUED"
def describe(self):
# 'RenewalSummary': {}, # Only when cert is amazon issued
if self._key.key_size == 1024:
key_algo = "RSA_1024"
elif self._key.key_size == 2048:
key_algo = "RSA_2048"
else:
key_algo = "EC_prime256v1"
# Look for SANs
try:
san_obj = self._cert.extensions.get_extension_for_oid(
cryptography.x509.OID_SUBJECT_ALTERNATIVE_NAME
)
except cryptography.x509.ExtensionNotFound:
san_obj = None
sans = []
if san_obj is not None:
sans = [item.value for item in san_obj.value]
result = {
"Certificate": {
"CertificateArn": self.arn,
"DomainName": self.common_name,
"InUseBy": self.in_use_by,
"Issuer": self._cert.issuer.get_attributes_for_oid(
cryptography.x509.OID_COMMON_NAME
)[0].value,
"KeyAlgorithm": key_algo,
"NotAfter": datetime_to_epoch(self._cert.not_valid_after),
"NotBefore": datetime_to_epoch(self._cert.not_valid_before),
"Serial": self._cert.serial_number,
"SignatureAlgorithm": self._cert.signature_algorithm_oid._name.upper().replace(
"ENCRYPTION", ""
),
"Status": self.status, # One of PENDING_VALIDATION, ISSUED, INACTIVE, EXPIRED, VALIDATION_TIMED_OUT, REVOKED, FAILED.
"Subject": "CN={0}".format(self.common_name),
"SubjectAlternativeNames": sans,
"Type": self.type, # One of IMPORTED, AMAZON_ISSUED
}
}
if self.type == "IMPORTED":
result["Certificate"]["ImportedAt"] = datetime_to_epoch(self.created_at)
else:
result["Certificate"]["CreatedAt"] = datetime_to_epoch(self.created_at)
result["Certificate"]["IssuedAt"] = datetime_to_epoch(self.created_at)
return result
def serialize_pk(self, passphrase_bytes):
pk_bytes = self._key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.BestAvailableEncryption(
passphrase_bytes
),
)
return pk_bytes.decode("utf-8")
def __str__(self):
return self.arn
def __repr__(self):
return "<Certificate>"
class AWSCertificateManagerBackend(BaseBackend):
def __init__(self, region):
super(AWSCertificateManagerBackend, self).__init__()
self.region = region
self._certificates = {}
self._idempotency_tokens = {}
def reset(self):
region = self.region
self.__dict__ = {}
self.__init__(region)
@staticmethod
def default_vpc_endpoint_service(service_region, zones):
"""Default VPC endpoint service."""
return BaseBackend.default_vpc_endpoint_service_factory(
service_region, zones, "acm-pca"
)
@staticmethod
def _arn_not_found(arn):
msg = "Certificate with arn {0} not found in account {1}".format(
arn, DEFAULT_ACCOUNT_ID
)
return AWSResourceNotFoundException(msg)
def set_certificate_in_use_by(self, arn, load_balancer_name):
if arn not in self._certificates:
raise self._arn_not_found(arn)
cert_bundle = self._certificates[arn]
cert_bundle.in_use_by.append(load_balancer_name)
def _get_arn_from_idempotency_token(self, token):
"""
If token doesnt exist, return None, later it will be
set with an expiry and arn.
If token expiry has passed, delete entry and return None
Else return ARN
:param token: String token
:return: None or ARN
"""
now = datetime.datetime.now()
if token in self._idempotency_tokens:
if self._idempotency_tokens[token]["expires"] < now:
# Token has expired, new request
del self._idempotency_tokens[token]
return None
else:
return self._idempotency_tokens[token]["arn"]
return None
def _set_idempotency_token_arn(self, token, arn):
self._idempotency_tokens[token] = {
"arn": arn,
"expires": datetime.datetime.now() + datetime.timedelta(hours=1),
}
def import_cert(self, certificate, private_key, chain=None, arn=None, tags=None):
if arn is not None:
if arn not in self._certificates:
raise self._arn_not_found(arn)
else:
# Will reuse provided ARN
bundle = CertBundle(
certificate, private_key, chain=chain, region=self.region, arn=arn
)
else:
# Will generate a random ARN
bundle = CertBundle(
certificate, private_key, chain=chain, region=self.region
)
self._certificates[bundle.arn] = bundle
if tags:
self.add_tags_to_certificate(bundle.arn, tags)
return bundle.arn
def get_certificates_list(self, statuses):
"""
Get list of certificates
:return: List of certificates
:rtype: list of CertBundle
"""
for arn in self._certificates.keys():
cert = self.get_certificate(arn)
if not statuses or cert.status in statuses:
yield cert
def get_certificate(self, arn):
if arn not in self._certificates:
raise self._arn_not_found(arn)
cert_bundle = self._certificates[arn]
cert_bundle.check()
return cert_bundle
def delete_certificate(self, arn):
if arn not in self._certificates:
raise self._arn_not_found(arn)
del self._certificates[arn]
def request_certificate(
self,
domain_name,
domain_validation_options,
idempotency_token,
subject_alt_names,
tags=None,
):
if idempotency_token is not None:
arn = self._get_arn_from_idempotency_token(idempotency_token)
if arn and self._certificates[arn].tags.equals(tags):
return arn
cert = CertBundle.generate_cert(
domain_name, region=self.region, sans=subject_alt_names
)
if idempotency_token is not None:
self._set_idempotency_token_arn(idempotency_token, cert.arn)
self._certificates[cert.arn] = cert
if tags:
cert.tags.add(tags)
return cert.arn
def add_tags_to_certificate(self, arn, tags):
# get_cert does arn check
cert_bundle = self.get_certificate(arn)
cert_bundle.tags.add(tags)
def remove_tags_from_certificate(self, arn, tags):
# get_cert does arn check
cert_bundle = self.get_certificate(arn)
cert_bundle.tags.remove(tags)
def export_certificate(self, certificate_arn, passphrase):
passphrase_bytes = base64.standard_b64decode(passphrase)
cert_bundle = self.get_certificate(certificate_arn)
certificate = cert_bundle.cert.decode()
certificate_chain = cert_bundle.chain.decode()
private_key = cert_bundle.serialize_pk(passphrase_bytes)
return certificate, certificate_chain, private_key
acm_backends = {}
for region, ec2_backend in ec2_backends.items():
acm_backends[region] = AWSCertificateManagerBackend(region)
|
'''
These transforms fix known bugs in the egosoft MD scripts.
'''
import xml.etree.ElementTree as ET
from .Support import XML_Find_Match, XML_Find_All_Matches, Make_Director_Shell
from . import Support # TODO: move xml functions to another module.
from ... import File_Manager
'''
TODO:
New Home (tc plots for ap)
- The final gate connection is not completed; only affects the base
version of this mod; rereleases tend to fix the bug.
- Low priority, since fixes exist and this is just a mod.
Defend station mission capping
- Ships capped as part of Defend Station still are considered attackers,
so the mission doesn't end.
- If cancelling the mission, the capped ship is deleted.
- https://forum.egosoft.com/viewtopic.php?f=94&t=381503&start=6375
'''
# TODO: convenience transform that runs all fixes, likely including these
# and some in other modules.
@File_Manager.Transform_Wrapper('director/2.024 Player Corp.xml',
FL = False)
def Fix_Corporation_Troubles_Balance_Rollover():
'''
In the Corporation Troubles plot, prevents the bank balance from
reaching >2 billion and rolling over due to the 32-bit signed
integer limit.
'''
'''
Notes:
Related thread with the complaint:
https://forum.egosoft.com/viewtopic.php?f=2&t=401564
Corperate bank balance accumulates every 60 minutes from
income/outgoing, and can eventually reach 2 billion and overflow.
A simple fix would be a limit check at the accumulation point, preventing
any change once reaching a safe cap (with some slack for adjustments
from random missions, though those mostly subtract and appear to only
add up to ~10 million or so when selling fake assets).
'''
# Value to limit the balance to.
# Go for something easy and obvious, with headroom.
# 2 billion is probably safe (100+ million headroom), but there is a
# 'loan' step that can add 200 million to the balance (maybe done once
# at start, unclear), so just limit to 1 billion for now.
max_balance = 1000000000
# Lay out the format of the original node, for matching.
match_node = ET.fromstring(
'''
<do_all>
<set_value name="L2M024.CorpBankBalance" exact="{value@L2M024.CorpIncome}" operation="add"/>
<set_value name="L2M024.CorpBankBalance" exact="{value@L2M024.CorpOutgoing}" operation="subtract"/>
<set_value name="L2M024.AccountTime" exact="{player.age}+3600"/>
<reset_cue cue="L2M024 Income Update"/>
</do_all>
''')
# Grab the director file.
file_contents = File_Manager.Load_File('director/2.024 Player Corp.xml')
tree = file_contents.Get_XML_Tree()
## Find the insertion point.
## It is a bit clumsy to try to pick this directly out of the xml tree
## structure, so instead look for the housing cue node:
## <cue name="L2M024 Income Update" ...>
## Searching syntax is a little weird. In short:
## '.': root node.
## '//': recursive search of all children.
## 'cue': look for 'cue' nodes.
## []: attributes values to match.
#found_nodes = tree.findall(".//cue[@name='L2M024 Income Update']")
## Should have a single match.
#assert len(found_nodes) == 1
# Find the original node using the support match function.
orig_node = XML_Find_Match(tree, match_node)
# Make the new node to be inserted, a do_if.
# This will write the text and parse it; could also manually make
# the elements. TODO: try both ways and see what is cleaner and
# more manageable.
insert_node = ET.fromstring(''.join([
# Check for over limit.
'<do_if value="{value@L2M024.CorpBankBalance}"'+' min="{}">'.format(max_balance),
# Set to limit.
' <set_value name="L2M024.CorpBankBalance"'+' exact="{}"/>'.format(max_balance),
'</do_if>',
]))
orig_node.append(insert_node)
# Push the changed xml tree to the file tracker.
# Reformat to get proper indents on new nodes.
file_contents.Update_From_XML_Node(tree, reformat = True)
return
@File_Manager.Transform_Wrapper('director/2.004 Terran Plot Scene 4.xml',
FL = False)
def Fix_Terran_Plot_Aimless_TPs():
'''
In the Terran Conflict plot when allied TPs move to capture an Elephant,
fix replacement TPs to move toward the Elephant instead of wandering
aimlessly.
'''
'''
Notes:
Related thread with the complaint:
https://forum.egosoft.com/viewtopic.php?f=93&t=397816
Problem is around <cue name="L2M004 Protect Boarding Crews">.
The initial set of TPs get the command:
<command command="follow" commandobject="L2M004.SplitTL"/>
The replacement TPs have all the same settings, except the command:
<command command="moveposition" commandobject="L2M004.SplitTL"/>
Supposing "moveposition" doesn't properly accept an object instead
of a coordinate, this would explain the described problems of TPs flying
off to the south of the map.
These script commands can be found in the obj code around L00195A3E.
Here, the behavior of "moveposition" is more clear:
- "commandobject" is only used to set the sector if "sector" not given.
- "position" is likely required for proper behavior, as it is
interpretted by a call to CUE.ReadPosition(position, sector).
- Thought: the above could be a bug in the KC, and the intended call
was CUE.ReadPosition(position, object). If true, changing that might
catch other similar script bugs naturally, but don't worry about
that for now (it would need more looking into).
The fix is simple: replacement TPs get the correct "follow" command.
'''
match_node = ET.fromstring(
'<command command="moveposition" commandobject="L2M004.SplitTL"/>')
# Grab the director file.
file_contents = File_Manager.Load_File('director/2.004 Terran Plot Scene 4.xml')
tree = file_contents.Get_XML_Tree()
# Find the original node using the support match function.
orig_node = XML_Find_Match(tree, match_node)
# Edit it.
orig_node.set('command','follow')
# Push the changed xml tree to the file tracker.
# Reformat to get proper indents on new nodes.
file_contents.Update_From_XML_Node(tree, reformat = True)
return
# Note: havent checked FL changes; bug may still exist (hopefully not).
@File_Manager.Transform_Wrapper('director/0.83 Dual Convoy.xml',
'director/2.183 Dual Convoy.xml',
FL = False)
def Fix_Dual_Convoy_Invincible_Stations():
'''
Fixes Dual Convoy generic missions to no longer leave stations
permenently invincible, and to no longer risk clearing invincibility
from plot stations, as well as fixes a minor bug in the parameter list.
Stations used by these missions will no longer be set invincible,
and a mission is cancelled if an end point station is destroyed.
Does not affect any invinciblity flags already set in an existing save.
Consider also using Fix_Reset_Invincible_Stations to clear leftover
flags in an existing save.
'''
'''
Notes:
Related thread with the complaint:
https://forum.egosoft.com/viewtopic.php?f=2&t=400240
Problem 1:
This node exists in the setup part of the mission, prior to creating
a mission briefing.
<set_group_invincible group="L2M183.StationGroup" invincible="1"/>
The "L2M183 End" cue has a matching command to clear the flag, but
this can only ever be reached after the active part of the mission
is started by the player accepting it.
Without acceptance, the mission will either time out, or get removed
due to the player traversing several gates.
It looks like a third situation is when a start/end station gets
destroyed somehow after being set invincible; that case can also
be fixed for extra safety.
Problem 2:
In addition to leaving stations invuln, this script may also end
up clearing the invuln flag on plot critical stations, if it
selects one as a start/end point, potentially breaking those plots
in a way they don't expect.
Problem 3 (minor; noticed along the way):
In "0.83 Dual Convoy" the parameter lists have "StartStation1" twice
instead of "StartStation2".
It is unclear on what effects this bug might have; maybe none since
the start stations are not used in the script anywhere, but it might
be good form to fix this.
Two possible fix approaches:
1) Aim to preserve the invincibility on mission stations.
- Add a set_group_invincible node to each place the mission can get
cancelled, clearing invincibility.
- Also, to protect plot stations, change the station selection code
to cancel this generic mission early (before invuln gets set) if
any of the selected stations are already set to invuln.
- Note: this is imperfect, as a selected station may also get used
for a plot after this generic mission is activated, leading to
accidental clearing of its plot-set invuln flag.
2) Remove invincibility entirely.
- Remove the invuln setting and clearing commands.
- Stations already invuln in an existing save can be addressed by
some other method that would be needed anyway to clean out
stale flags.
- Ensure the mission will exit nicely if a station gets destroyed.
Go with option (2), since (1) still leaves bugs in place.
How to clean up missions when stations are destroyed?
- Existing conditions for ending the mission aren't really set up to
capture station destruction.
- So, set up a new cue just for this case, triggering if either end
station gets destroyed (start stations can be ignored).
- Action logic can borrow from the other cues to trigger cleanup,
set a return code, etc.
Test results:
Quick test destroying an end point station did cancel out the
mission, albeit with a message saying the convoy was destroyed.
Any further robustness tests are pending.
'''
# Get the input files of interest.
# For lack of a better name prefix, these are 'low' and 'high' for
# the level of their logic.
low_file_contents = File_Manager.Load_File('director/0.83 Dual Convoy.xml')
low_tree = low_file_contents.Get_XML_Tree()
high_file_contents = File_Manager.Load_File('director/2.183 Dual Convoy.xml')
high_tree = high_file_contents.Get_XML_Tree()
# Simple parameter name fix.
match_node = ET.fromstring(
'<param name="StartStation1" type="objectname" compulsory="1"'
' description="Start Station 2 for the mission"/>')
found_nodes = XML_Find_All_Matches(low_tree, match_node)
# There should be two matches.
assert len(found_nodes) == 2
# Adjust the name.
for node in found_nodes:
node.set('name','StartStation2')
# Add the new abort-like cue.
# Create a new node to insert after the abort.
# Progress will be set to 2, failure, so that the higher level will
# cancel the objective. (The abort progress code, 99, doesn't have
# any higher level handler.)
# To be safe, also borrow some cue cancelling commands from the
# 'convoy destroyed' cue.
insert_node = ET.fromstring(
'''
<cue name="L0M83 Station Destroyed">
<condition>
<check_value value="{object.exists@{param@Cue}.{param@ID} L0M83EndStation1}*{object.exists@{param@Cue}.{param@ID} L0M83EndStation2}" exact="0"/>
</condition>
<action>
<do_all>
<set_value name="{param@Cue}.{param@ID} L0M83 Progress" exact="2"/>
<set_value name="L0M83.CleanUp" exact="1"/>
<cancel_cue cue="L0M83 Convoy 1 Finished"/>
<cancel_cue cue="L0M83 Convoy 2 Finished"/>
<cancel_cue cue="L0M83 Start Enemies"/>
</do_all>
</action>
</cue>
'''
)
# Make sure the new queue name is not already present, in case this
# transform was already run before.
assert low_tree.find('.//cue[@name="L0M83 Station Destroyed"]') == None
# Insert after the abort.
# Find the original abort node.
abort_node = low_tree.find('.//cue[@name="L0M83 Aborted"]')
# Find its parent as well.
parent_node = low_tree.find('.//cue[@name="L0M83 Aborted"]/..')
# Find the index of the node in the child list of its parent.
#abort_index = list(parent_node).index(abort_node)
#parent_node.insert(abort_index + 1, insert_node)
Support.XML_Insert_After(parent_node, abort_node, insert_node)
# Match children to parents.
child_parent_dict = {child : parent
for parent in high_tree.iter()
for child in parent}
# Remove the invincibility settings from the top level script.
# Loop over the set_group_invincible nodes.
for node in high_tree.findall('.//set_group_invincible'):
# Remove from the parent.
child_parent_dict[node].remove(node)
# Update the xml in both files.
# By doing this at the end, any errors above will prevent any partial
# changes from being completed.
low_file_contents.Update_From_XML_Node(low_tree, reformat = True)
high_file_contents.Update_From_XML_Node(high_tree, reformat = True)
return
@File_Manager.Transform_Wrapper(FL = False)
def Fix_Reset_Invincible_Stations(cue_index = 0):
'''
Resets the invinciblity flag on stations in an existing save.
Works by re-triggering the matching script contained in an AP patch,
which will preserve invincibilty for AP plot related stations.
Warnings: invincibility flags from other sources (eg. TC plots for AP)
may be lost.
Pending test and verification.
* cue_index
- Int, index for the director cue which will retrigger the reset
call. Increment this if wanting to run the reset script again for
an existing save, as each cue name will fire only once.
- Default is 0.
'''
reset_file_base_name = 'X3_Customizer_Reset_Invincible_Stations'
Make_Director_Shell(
cue_name = reset_file_base_name + '_' + str(cue_index),
body_text = '<reset_cue cue="Reset_Invincible_Stations"/>',
file_name = reset_file_base_name +'.xml')
'''
TODO: patch the existing script to be more robust for stations
it might miss. Eg., at a glance, it doesn't include "L2M023.A2 CKPS"
for Shady Business.
'''
return
@File_Manager.Transform_Wrapper('director/2.023 Shady Business.xml',
FL = False)
def _Fix_Shady_Business_Captured_Ship_Despawn():
'''
In the Shady Business plot, at the end, various spawned ship groups
will be destroyed without considering if the player captured any
of them. This adds in player ownership checks before destruction
is allowed.
In development.
'''
'''
Notes:
Related thread with the complaint:
https://forum.egosoft.com/viewtopic.php?f=2&t=401504
Problem:
Problem code is in cue "L2M023.Cleanup", which destroys several
groups of ships without any checks on ownership.
In addition to the complained about case, the destruction commands
occur many other places in the script.
Fix:
For each single-ship destruction ("destroy_object") node, can
bury it underneath a do_if that checks ownership first.
For the group destruction ("destroy_group") nodes, a more complex
replacement is needed which will loop over the group on a per-ship
basis, check ownership and destroy.
To capture all cases, the script will need to be searched and have
these checks placed at every instance of these commands.
TODO:
Should this edit be made to all scripts blindly? Or is player
ship destruction wanted in some cases?
Could the obj code be edited in some way to achieve this effect
more naturally?
'''
# Put the script name here, in possible prep for making this transform
# work on any script.
script_name = 'director/2.023 Shady Business.xml'
# Grab the director file.
file_contents = File_Manager.Load_File(script_name)
tree = file_contents.Get_XML_Tree()
# Find all destroy_object nodes.
destroy_object_nodes = tree.findall('.//destroy_object')
# Find all destroy_group nodes.
destroy_group_nodes = tree.findall('.//destroy_group')
# TODO: edit the nodes.
# Push the changed xml tree to the file tracker.
# Reformat to get proper indents on new nodes.
file_contents.Update_From_XML_Node(tree, reformat = True)
return
|
from maingui import main
main()
|
from django.urls import path
from . import views
from .views import CustomLoginView,RegisterPage
from django.contrib.auth.views import LogoutView
urlpatterns = [
path('login/', CustomLoginView.as_view(), name='login'),
path('logout/', LogoutView.as_view(next_page='login'), name='logout'),
path('register/', RegisterPage.as_view(), name='register'),
path('', views.home, name='home'),
path('blog/', views.frontpage, name='blog'),
path('<slug:slug>/',views.post_detail, name='post_detail'),
]
|
import re
import typing
import pytest
from dagster import (
Any,
DagsterInvalidConfigDefinitionError,
DagsterInvalidConfigError,
DagsterInvalidDefinitionError,
Field,
Float,
Int,
List,
ModeDefinition,
Noneable,
Permissive,
PipelineDefinition,
ResourceDefinition,
Set,
String,
Tuple,
composite_solid,
execute_pipeline,
execute_solid,
pipeline,
solid,
)
from dagster.config.errors import DagsterEvaluationErrorReason
from dagster.config.field_utils import convert_potential_field
from dagster.config.validate import process_config, validate_config
def test_noop_config():
assert Field(Any)
def test_int_field():
config_field = convert_potential_field({"int_field": Int})
assert validate_config(config_field.config_type, {"int_field": 1}).value == {"int_field": 1}
def test_float_field():
config_field = convert_potential_field({"float_field": Float})
assert validate_config(config_field.config_type, {"float_field": 1.0}).value == {
"float_field": 1.0
}
assert process_config(config_field.config_type, {"float_field": 1.0}).value == {
"float_field": 1.0
}
assert validate_config(config_field.config_type, {"float_field": 1}).value == {"float_field": 1}
assert process_config(config_field.config_type, {"float_field": 1}).value == {
"float_field": 1.0
}
def assert_config_value_success(config_type, config_value, expected):
result = process_config(config_type, config_value)
assert result.success
assert result.value == expected
def assert_eval_failure(config_type, value):
assert not validate_config(config_type, value).success
def test_int_fails():
config_field = convert_potential_field({"int_field": Int})
assert_eval_failure(config_field.config_type, {"int_field": "fjkdj"})
assert_eval_failure(config_field.config_type, {"int_field": True})
def test_default_arg():
config_field = convert_potential_field(
{"int_field": Field(Int, default_value=2, is_required=False)}
)
assert_config_value_success(config_field.config_type, {}, {"int_field": 2})
def test_default_float_arg():
config_field = convert_potential_field(
{"float_field": Field(Float, default_value=2.0, is_required=False)}
)
assert_config_value_success(config_field.config_type, {}, {"float_field": 2.0})
config_field = convert_potential_field(
{"float_field": Field(Float, default_value=2, is_required=False)}
)
assert_config_value_success(config_field.config_type, {}, {"float_field": 2})
def _single_required_string_config_dict():
return convert_potential_field({"string_field": String})
def _multiple_required_fields_config_dict():
return convert_potential_field({"field_one": String, "field_two": String})
def _single_optional_string_config_dict():
return convert_potential_field({"optional_field": Field(String, is_required=False)})
def _single_optional_string_field_config_dict_with_default():
optional_field_def = Field(String, is_required=False, default_value="some_default")
return convert_potential_field({"optional_field": optional_field_def})
def _mixed_required_optional_string_config_dict_with_default():
return convert_potential_field(
{
"optional_arg": Field(String, is_required=False, default_value="some_default"),
"required_arg": Field(String, is_required=True),
"optional_arg_no_default": Field(String, is_required=False),
}
)
def _multiple_required_fields_config_permissive_dict():
return Field(Permissive({"field_one": Field(String), "field_two": Field(String)}))
def _validate(config_field, value):
res = process_config(config_field.config_type, value)
assert res.success, res.errors[0].message
return res.value
def test_single_required_string_field_config_type():
assert _validate(_single_required_string_config_dict(), {"string_field": "value"}) == {
"string_field": "value"
}
with pytest.raises(
AssertionError, match='Missing required config entry "string_field" at the root.',
):
_validate(_single_required_string_config_dict(), {})
with pytest.raises(AssertionError):
_validate(_single_required_string_config_dict(), {"extra": "yup"})
with pytest.raises(AssertionError):
_validate(_single_required_string_config_dict(), {"string_field": "yupup", "extra": "yup"})
with pytest.raises(AssertionError):
_validate(_single_required_string_config_dict(), {"string_field": 1})
def test_undefined_field_error():
with pytest.raises(
AssertionError,
match=(
'Received unexpected config entry "extra" at the root. Expected: "{ string_field: '
'String }".'
),
):
_validate(
_single_required_string_config_dict(), {"string_field": "value", "extra": "extra"}
)
def test_multiple_required_fields_passing():
assert _validate(
_multiple_required_fields_config_dict(),
{"field_one": "value_one", "field_two": "value_two"},
) == {"field_one": "value_one", "field_two": "value_two"}
def test_multiple_required_fields_failing():
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_dict(), {})
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_dict(), {"field_one": "yup"})
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_dict(), {"field_one": "yup", "extra": "yup"})
with pytest.raises(AssertionError):
_validate(
_multiple_required_fields_config_dict(),
{"field_one": "yup", "field_two": "yup", "extra": "should_not_exist"},
)
with pytest.raises(AssertionError):
_validate(
_multiple_required_fields_config_dict(), {"field_one": "value_one", "field_two": 2}
)
def test_single_optional_field_passing():
assert _validate(_single_optional_string_config_dict(), {"optional_field": "value"}) == {
"optional_field": "value"
}
assert _validate(_single_optional_string_config_dict(), {}) == {}
with pytest.raises(AssertionError):
assert _validate(_single_optional_string_config_dict(), {"optional_field": None}) == {
"optional_field": None
}
def test_single_optional_field_failing():
with pytest.raises(AssertionError):
_validate(_single_optional_string_config_dict(), {"optional_field": 1})
with pytest.raises(AssertionError):
_validate(_single_optional_string_config_dict(), {"dlkjfalksdjflksaj": 1})
def test_single_optional_field_passing_with_default():
assert _validate(_single_optional_string_field_config_dict_with_default(), {}) == {
"optional_field": "some_default"
}
assert _validate(
_single_optional_string_field_config_dict_with_default(), {"optional_field": "override"}
) == {"optional_field": "override"}
def test_permissive_multiple_required_fields_passing():
assert _validate(
_multiple_required_fields_config_permissive_dict(),
{
"field_one": "value_one",
"field_two": "value_two",
"previously_unspecified": "should_exist",
},
) == {
"field_one": "value_one",
"field_two": "value_two",
"previously_unspecified": "should_exist",
}
def test_permissive_multiple_required_fields_nested_passing():
assert _validate(
_multiple_required_fields_config_permissive_dict(),
{
"field_one": "value_one",
"field_two": "value_two",
"previously_unspecified": {"nested": "value", "with_int": 2},
},
) == {
"field_one": "value_one",
"field_two": "value_two",
"previously_unspecified": {"nested": "value", "with_int": 2},
}
def test_permissive_multiple_required_fields_failing():
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_permissive_dict(), {})
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_permissive_dict(), {"field_one": "yup"})
with pytest.raises(AssertionError):
_validate(
_multiple_required_fields_config_permissive_dict(),
{"field_one": "value_one", "field_two": 2},
)
def test_mixed_args_passing():
assert _validate(
_mixed_required_optional_string_config_dict_with_default(),
{"optional_arg": "value_one", "required_arg": "value_two"},
) == {"optional_arg": "value_one", "required_arg": "value_two"}
assert _validate(
_mixed_required_optional_string_config_dict_with_default(), {"required_arg": "value_two"}
) == {"optional_arg": "some_default", "required_arg": "value_two"}
assert _validate(
_mixed_required_optional_string_config_dict_with_default(),
{"required_arg": "value_two", "optional_arg_no_default": "value_three"},
) == {
"optional_arg": "some_default",
"required_arg": "value_two",
"optional_arg_no_default": "value_three",
}
def _single_nested_config():
return convert_potential_field({"nested": {"int_field": Int}})
def _nested_optional_config_with_default():
return convert_potential_field(
{"nested": {"int_field": Field(Int, is_required=False, default_value=3)}}
)
def _nested_optional_config_with_no_default():
return convert_potential_field({"nested": {"int_field": Field(Int, is_required=False)}})
def test_single_nested_config():
assert _validate(_single_nested_config(), {"nested": {"int_field": 2}}) == {
"nested": {"int_field": 2}
}
def test_single_nested_config_undefined_errors():
with pytest.raises(
AssertionError,
match='Value at path root:nested must be dict. Expected: "{ int_field: Int }".',
):
_validate(_single_nested_config(), {"nested": "dkjfdk"})
with pytest.raises(
AssertionError,
match='Invalid scalar at path root:nested:int_field. Value "dkjfdk" of type .* is not valid for expected type "Int".',
):
_validate(_single_nested_config(), {"nested": {"int_field": "dkjfdk"}})
with pytest.raises(
AssertionError,
match=(
'Received unexpected config entry "not_a_field" at path root:nested. Expected: '
'"{ int_field: Int }".'
),
):
_validate(_single_nested_config(), {"nested": {"int_field": 2, "not_a_field": 1}})
with pytest.raises(
AssertionError,
match="Invalid scalar at path root:nested:int_field. Value \"{'too_nested': 'dkjfdk'}\" of type .* is not valid for expected type \"Int\".",
):
_validate(_single_nested_config(), {"nested": {"int_field": {"too_nested": "dkjfdk"}}})
def test_nested_optional_with_default():
assert _validate(_nested_optional_config_with_default(), {"nested": {"int_field": 2}}) == {
"nested": {"int_field": 2}
}
assert _validate(_nested_optional_config_with_default(), {"nested": {}}) == {
"nested": {"int_field": 3}
}
def test_nested_optional_with_no_default():
assert _validate(_nested_optional_config_with_no_default(), {"nested": {"int_field": 2}}) == {
"nested": {"int_field": 2}
}
assert _validate(_nested_optional_config_with_no_default(), {"nested": {}}) == {"nested": {}}
def test_config_defaults():
@solid(config_schema={"sum": Int})
def two(_context):
assert _context.solid_config["sum"] == 6
return _context.solid_config["sum"]
@solid(config_schema={"sum": Int})
def one(_context, prev_sum):
assert prev_sum == 6
return prev_sum + _context.solid_config["sum"]
# addition_composite_solid
def addition_composite_solid_config_fn(config):
child_config = {"config": {"sum": config["a"] + config["b"] + config["c"]}}
return {"one": child_config, "two": child_config}
@composite_solid(
config_fn=addition_composite_solid_config_fn,
config_schema={
"a": Field(Int, is_required=False, default_value=1),
"b": Field(Int, is_required=False, default_value=2),
"c": Int,
},
)
def addition_composite_solid():
return one(two())
@pipeline
def addition_pipeline():
addition_composite_solid()
result = execute_pipeline(
addition_pipeline, {"solids": {"addition_composite_solid": {"config": {"c": 3}}}}
)
assert result.success
def test_config_with_and_without_config():
@solid(config_schema={"prefix": Field(str, is_required=False, default_value="_")})
def prefix_value(context, v):
return "{prefix}{v}".format(prefix=context.solid_config["prefix"], v=v)
@composite_solid(
config_fn=lambda cfg: {"prefix_value": {"config": {"prefix": cfg["prefix"]}}},
config_schema={"prefix": Field(str, is_required=False, default_value="_id_")},
)
def prefix_id(val):
return prefix_value(val)
@solid
def print_value(_, v):
return str(v)
@pipeline
def config_issue_pipeline():
v = prefix_id()
print_value(v)
result = execute_pipeline(
config_issue_pipeline,
{
"solids": {
"prefix_id": {
"config": {"prefix": "_customprefix_"},
"inputs": {"val": {"value": "12345"}},
}
}
},
)
assert result.success
assert result.result_for_solid("print_value").output_value() == "_customprefix_12345"
result_using_default = execute_pipeline(
config_issue_pipeline,
{"solids": {"prefix_id": {"config": {}, "inputs": {"val": {"value": "12345"}}}}},
)
assert result_using_default.success
assert result_using_default.result_for_solid("print_value").output_value() == "_id_12345"
def test_build_optionality():
optional_test_type = convert_potential_field(
{"required": {"value": String}, "optional": {"value": Field(String, is_required=False)},}
).config_type
assert optional_test_type.fields["required"].is_required
assert optional_test_type.fields["optional"].is_required is False
def test_wrong_solid_name():
@solid(name="some_solid", input_defs=[], output_defs=[], config_schema=Int)
def some_solid(_):
return None
@pipeline(name="pipeline_wrong_solid_name")
def pipeline_def():
some_solid()
env_config = {"solids": {"another_name": {"config": {}}}}
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(pipeline_def, env_config)
pe = pe_info.value
assert 'Received unexpected config entry "another_name" at path root:solids' in str(pe)
def fail_me():
assert False
def dummy_resource(config_schema=None):
return ResourceDefinition(lambda _: None, config_schema=config_schema)
def test_wrong_resources():
pipeline_def = PipelineDefinition(
name="pipeline_test_multiple_context",
mode_defs=[
ModeDefinition(
resource_defs={"resource_one": dummy_resource(), "resource_two": dummy_resource()}
)
],
solid_defs=[],
)
with pytest.raises(
DagsterInvalidConfigError,
match='Received unexpected config entry "nope" at path root:resources',
):
execute_pipeline(pipeline_def, {"resources": {"nope": {}}})
def test_solid_list_config():
value = [1, 2]
called = {}
@solid(name="solid_list_config", input_defs=[], output_defs=[], config_schema=[int])
def solid_list_config(context):
assert context.solid_config == value
called["yup"] = True
@pipeline(name="solid_list_config_pipeline")
def pipeline_def():
solid_list_config()
result = execute_pipeline(
pipeline_def, run_config={"solids": {"solid_list_config": {"config": value}}}
)
assert result.success
assert called["yup"]
def test_two_list_types():
@solid(
input_defs=[], config_schema={"list_one": [int], "list_two": [int]},
)
def two_list_type(context):
return context.solid_config
assert execute_solid(
two_list_type,
run_config={"solids": {"two_list_type": {"config": {"list_one": [1], "list_two": [2]}}}},
).output_value() == {"list_one": [1], "list_two": [2]}
@solid(
input_defs=[], config_schema={"list_one": [Int], "list_two": [Int]},
)
def two_list_type_condensed_syntax(context):
return context.solid_config
assert execute_solid(
two_list_type_condensed_syntax,
run_config={
"solids": {
"two_list_type_condensed_syntax": {"config": {"list_one": [1], "list_two": [2]}}
}
},
).output_value() == {"list_one": [1], "list_two": [2]}
@solid(
input_defs=[], config_schema={"list_one": [int], "list_two": [int]},
)
def two_list_type_condensed_syntax_primitives(context):
return context.solid_config
assert execute_solid(
two_list_type_condensed_syntax_primitives,
run_config={
"solids": {
"two_list_type_condensed_syntax_primitives": {
"config": {"list_one": [1], "list_two": [2]}
}
}
},
).output_value() == {"list_one": [1], "list_two": [2]}
def test_multilevel_default_handling():
@solid(config_schema=Field(Int, is_required=False, default_value=234))
def has_default_value(context):
assert context.solid_config == 234
pipeline_def = PipelineDefinition(
name="multilevel_default_handling", solid_defs=[has_default_value]
)
assert execute_pipeline(pipeline_def).success
assert execute_pipeline(pipeline_def, run_config=None).success
assert execute_pipeline(pipeline_def, run_config={}).success
assert execute_pipeline(pipeline_def, run_config={"solids": {}}).success
assert execute_pipeline(pipeline_def, run_config={"solids": {"has_default_value": {}}}).success
assert execute_pipeline(
pipeline_def, run_config={"solids": {"has_default_value": {"config": 234}}}
).success
def test_no_env_missing_required_error_handling():
@solid(config_schema=Int)
def required_int_solid(_context):
pass
pipeline_def = PipelineDefinition(
name="no_env_missing_required_error", solid_defs=[required_int_solid]
)
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(pipeline_def)
assert isinstance(pe_info.value, DagsterInvalidConfigError)
pe = pe_info.value
assert len(pe.errors) == 1
mfe = pe.errors[0]
assert mfe.reason == DagsterEvaluationErrorReason.MISSING_REQUIRED_FIELD
assert len(pe.errors) == 1
assert pe.errors[0].message == 'Missing required config entry "solids" at the root.'
def test_root_extra_field():
@solid(config_schema=Int)
def required_int_solid(_context):
pass
@pipeline
def pipeline_def():
required_int_solid()
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(
pipeline_def,
run_config={"solids": {"required_int_solid": {"config": 948594}}, "nope": None},
)
pe = pe_info.value
assert len(pe.errors) == 1
fnd = pe.errors[0]
assert fnd.reason == DagsterEvaluationErrorReason.FIELD_NOT_DEFINED
assert 'Received unexpected config entry "nope"' in pe.message
def test_deeper_path():
@solid(config_schema=Int)
def required_int_solid(_context):
pass
@pipeline
def pipeline_def():
required_int_solid()
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(
pipeline_def, run_config={"solids": {"required_int_solid": {"config": "asdf"}}}
)
pe = pe_info.value
assert len(pe.errors) == 1
rtm = pe.errors[0]
assert rtm.reason == DagsterEvaluationErrorReason.RUNTIME_TYPE_MISMATCH
def test_working_list_path():
called = {}
@solid(config_schema=[int])
def required_list_int_solid(context):
assert context.solid_config == [1, 2]
called["yup"] = True
@pipeline
def pipeline_def():
required_list_int_solid()
result = execute_pipeline(
pipeline_def, run_config={"solids": {"required_list_int_solid": {"config": [1, 2]}}}
)
assert result.success
assert called["yup"]
def test_item_error_list_path():
called = {}
@solid(config_schema=[int])
def required_list_int_solid(context):
assert context.solid_config == [1, 2]
called["yup"] = True
@pipeline
def pipeline_def():
required_list_int_solid()
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(
pipeline_def,
run_config={"solids": {"required_list_int_solid": {"config": [1, "nope"]}}},
)
pe = pe_info.value
assert len(pe.errors) == 1
rtm = pe.errors[0]
assert rtm.reason == DagsterEvaluationErrorReason.RUNTIME_TYPE_MISMATCH
assert "Invalid scalar at path root:solids:required_list_int_solid:config[1]" in str(pe)
def test_list_in_config_error():
error_msg = (
"Cannot use List in the context of config. "
"Please use a python list (e.g. [int]) or dagster.Array (e.g. Array(int)) instead."
)
with pytest.raises(DagsterInvalidDefinitionError, match=re.escape(error_msg)):
@solid(config_schema=List[int])
def _no_runtime_list_in_config(_):
pass
def test_required_resource_not_given():
@pipeline(
name="required_resource_not_given",
mode_defs=[ModeDefinition(resource_defs={"required": dummy_resource(Int)})],
)
def pipeline_def():
pass
with pytest.raises(DagsterInvalidConfigError) as not_none_pe_info:
execute_pipeline(pipeline_def, run_config={"resources": None})
assert len(not_none_pe_info.value.errors) == 1
assert (
"Value at path root:resources must not be None." in not_none_pe_info.value.errors[0].message
)
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(pipeline_def, run_config={"resources": {}})
pe = pe_info.value
error = pe.errors[0]
assert error.reason == DagsterEvaluationErrorReason.MISSING_REQUIRED_FIELD
assert error.message == 'Missing required config entry "required" at path root:resources.'
def test_multilevel_good_error_handling_solids():
@solid(config_schema=Int)
def good_error_handling(_context):
pass
@pipeline
def pipeline_def():
good_error_handling()
with pytest.raises(DagsterInvalidConfigError) as not_none_pe_info:
execute_pipeline(pipeline_def, run_config={"solids": None})
assert len(not_none_pe_info.value.errors) == 1
assert "Value at path root:solids must not be None." in not_none_pe_info.value.errors[0].message
with pytest.raises(DagsterInvalidConfigError) as missing_field_pe_info:
execute_pipeline(pipeline_def, run_config={"solids": {}})
assert len(missing_field_pe_info.value.errors) == 1
assert missing_field_pe_info.value.errors[0].message == (
"""Missing required config entry "good_error_handling" at path root:solids."""
)
def test_multilevel_good_error_handling_solid_name_solids():
@solid(config_schema=Int)
def good_error_handling(_context):
pass
@pipeline
def pipeline_def():
good_error_handling()
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(pipeline_def, run_config={"solids": {"good_error_handling": {}}})
assert len(pe_info.value.errors) == 1
assert pe_info.value.errors[0].message == (
"""Missing required config entry "config" at path root:solids:good_error_handling."""
)
def test_multilevel_good_error_handling_config_solids_name_solids():
@solid(config_schema=Noneable(int))
def good_error_handling(_context):
pass
@pipeline
def pipeline_def():
good_error_handling()
execute_pipeline(pipeline_def, run_config={"solids": {"good_error_handling": {"config": None}}})
def test_invalid_default_values():
with pytest.raises(
DagsterInvalidConfigError,
match='Value "3" of type .* is not valid for expected type "Int"',
):
@solid(config_schema=Field(Int, default_value="3"))
def _solid(_):
pass
def test_typing_types_into_config():
match_str = re.escape(
"You have passed in typing.List to the config system. "
"Types from the typing module in python are not allowed "
"in the config system. You must use types that are imported "
"from dagster or primitive types such as bool, int, etc."
)
with pytest.raises(DagsterInvalidDefinitionError, match=match_str):
@solid(config_schema=Field(typing.List))
def _solid(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=match_str):
@solid(config_schema=typing.List)
def _solid(_):
pass
match_str = re.escape(
"You have passed in typing.List[int] to the config system. Types "
"from the typing module in python are not allowed in the config system. "
"You must use types that are imported from dagster or primitive types "
"such as bool, int, etc."
)
with pytest.raises(DagsterInvalidDefinitionError, match=match_str):
@solid(config_schema=Field(typing.List[int]))
def _solid(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=match_str):
@solid(config_schema=typing.List[int])
def _solid(_):
pass
for ttype in [
typing.Optional[int],
typing.Set,
typing.Set[int],
typing.Dict,
typing.Dict[int, str],
typing.Tuple,
typing.Tuple[int, int],
]:
with pytest.raises(DagsterInvalidDefinitionError):
@solid(config_schema=Field(ttype))
def _solid(_):
pass
def test_no_set_in_config_system():
set_error_msg = re.escape("Cannot use Set in the context of a config field.")
with pytest.raises(DagsterInvalidDefinitionError, match=set_error_msg):
@solid(config_schema=Field(Set))
def _bare_open_set(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=set_error_msg):
@solid(config_schema=Set)
def _bare_open_set(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=set_error_msg):
@solid(config_schema=Field(Set[int]))
def _bare_closed_set(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=set_error_msg):
@solid(config_schema=Set[int])
def _bare_closed_set(_):
pass
def test_no_tuple_in_config_system():
tuple_error_msg = re.escape("Cannot use Tuple in the context of a config field.")
with pytest.raises(DagsterInvalidDefinitionError, match=tuple_error_msg):
@solid(config_schema=Field(Tuple))
def _bare_open_tuple(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=tuple_error_msg):
@solid(config_schema=Field(Tuple[int]))
def _bare_closed_set(_):
pass
def test_field_is_none():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config_schema={"none_field": None})
def _none_is_bad(_):
pass
assert "Fields cannot be None" in str(exc_info.value)
|
import tensorflow as tf
from openrec.legacy.modules.interactions import Interaction
class NsLog(Interaction):
def __init__(
self,
user,
max_item,
item=None,
item_bias=None,
p_item=None,
p_item_bias=None,
neg_num=5,
n_item=None,
n_item_bias=None,
train=None,
scope=None,
reuse=False,
):
assert train is not None, "train cannot be None"
assert user is not None, "user cannot be None"
self._user = user
self._neg_num = neg_num
self._max_item = max_item
if train:
assert p_item is not None, "p_item cannot be None"
assert n_item is not None, "n_item cannot be None"
assert p_item_bias is not None, "p_item_bias cannot be None"
assert n_item_bias is not None, "n_item_bias cannot be None"
self._p_item = p_item
self._n_item = n_item
self._p_item_bias = p_item_bias
self._n_item_bias = n_item_bias
else:
assert item is not None, "item cannot be None"
assert item_bias is not None, "item_bias cannot be None"
self._item = item
self._item_bias = item_bias
super(NsLog, self).__init__(train=train, scope=scope, reuse=reuse)
def _build_training_graph(self):
with tf.variable_scope(self._scope, reuse=self._reuse):
tmp_user = tf.tile(tf.expand_dims(self._user, 1), [1, self._neg_num, 1])
dot_user_pos = tf.tile(
tf.reduce_sum(
tf.multiply(self._user, self._p_item),
reduction_indices=1,
keep_dims=True,
name="dot_user_pos",
),
[1, self._neg_num],
)
dot_user_neg = tf.reduce_sum(
tf.multiply(tmp_user, self._n_item),
reduction_indices=2,
name="dot_user_neg",
)
pos_score = dot_user_pos + tf.tile(self._p_item_bias, [1, self._neg_num])
neg_score = dot_user_neg + tf.reduce_sum(
self._n_item_bias, reduction_indices=2
)
diff = pos_score - neg_score
weights = tf.count_nonzero(tf.less(diff, 0.0), axis=1)
weights = tf.log(
tf.floor(self._max_item * tf.to_float(weights) / self._neg_num) + 1.0
)
self._loss = -tf.reduce_sum(
tf.log(
tf.sigmoid(tf.maximum(weights * tf.reduce_min(diff, axis=1), -30.0))
)
)
def _build_serving_graph(self):
with tf.variable_scope(self._scope, reuse=self._reuse):
self._outputs.append(
tf.matmul(self._user, self._item, transpose_b=True)
+ tf.reshape(self._item_bias, [-1])
)
|
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, BatchNormalization, Conv2D, MaxPool2D, GlobalMaxPool2D, Dropout
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.models import Model
from tensorflow.keras import layers, models
from ai_config import *
def new_model():
input_layer = tf.keras.Input(shape=(H, W, C))
x = layers.Conv2D(32, 3, activation='relu')(input_layer)
x = layers.MaxPooling2D((2, 2))(x)
x = layers.Conv2D(64, 3, activation='relu')(x)
x = layers.MaxPooling2D((2, 2))(x)
x = layers.Conv2D(64, 3, activation='relu')(x)
x = layers.MaxPooling2D((2, 2))(x)
x = layers.Flatten()(x)
x = layers.Dense(1024, activation='relu')(x)
# x = layers.Dropout(0.5)(x)
x = layers.Dense(D * N_LABELS, activation='softmax')(x)
x = layers.Reshape((D, N_LABELS))(x)
model = models.Model(inputs=input_layer, outputs=x)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics= ['accuracy'])
model.summary()
return(model)
def load_model(target):
model = tf.keras.models.load_model(target)
model.summary()
return(model)
# include 1 model
|
"""
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
H_1 H_2 H_3 ... H_n
q q q q
| | | |
\ | | /
.....
\ | /
a
Constructs a unit mapping.
$$(H_1 + H_n, q) => (a)$$
Where H is of `batch x n x dim` and q is of `batch x dim`.
The full def is $$\tanh(W_2 [(softmax((W_1 q + b_1) H) H), q] + b_2)$$.:
"""
import torch
import torch.nn as nn
import math
class GlobalAttention(nn.Module):
def __init__(self, dim):
super(GlobalAttention, self).__init__()
self.linear_in = nn.Linear(dim, dim, bias=False)
self.sm = nn.Softmax()
self.linear_out = nn.Linear(dim*2, dim, bias=False)
self.tanh = nn.Tanh()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, input, context):
"""
input: batch x dim
context: batch x sourceL x dim
"""
targetT = self.linear_in(input).unsqueeze(2) # batch x dim x 1
# Get attention
attn = torch.bmm(context, targetT).squeeze(2) # batch x sourceL
#import pdb; pdb.set_trace()
if self.mask is not None:
attn.data.masked_fill_(self.mask.view(-1, attn.shape[-1]), -float('inf'))
attn = self.sm(attn)
attn3 = attn.view(attn.size(0), 1, attn.size(1)) # batch x 1 x sourceL
weightedContext = torch.bmm(attn3, context).squeeze(1) # batch x dim
contextCombined = torch.cat((weightedContext, input), 1)
contextOutput = self.tanh(self.linear_out(contextCombined))
return contextOutput, attn
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoind with different proxy configuration.
Test plan:
- Start supernodecoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on supernodecoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create supernodecoinds that connect to them
- Manipulate the supernodecoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import SupernodeCoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(SupernodeCoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
|
import os
SETTINGS = {
"INFO": {
"name": "Emotion",
"info": "The image micro-service",
"version": "1.0.0",
},
"CASSANDRA": {
"HOST": ["localhost", ],
"KEYSPACE": "emotion",
},
"BASE_URL": "http://localhost:5000",
"UPLOAD_FOLDER": os.path.basename('uploads'),
"CACHE_FOLDER": os.path.basename('cache'),
"ALLOWED_EXTENSIONS": ['png', 'jpg', 'jpeg', 'gif'],
"MAX_CONTENT_LENGTH": 16 * 1024 * 1024,
}
|
from django.apps import AppConfig
class AbesitConfig(AppConfig):
name = 'abesit'
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from get_data import MasterData
from get_data import my_grid_search
from sklearn.preprocessing import MinMaxScaler
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_squared_error
md = MasterData('Boston')
scaler = MinMaxScaler()
# Scale the data
X_train = pd.DataFrame(data=scaler.fit_transform(md.x_train), columns=md.x_train.columns, index=md.x_train.index)
X_test = pd.DataFrame(data=scaler.fit_transform(md.x_eval), columns=md.x_eval.columns, index=md.x_eval.index)
model = MLPRegressor(hidden_layer_sizes=(13, 13, 13), activation='logistic', alpha=0.0001, solver='lbfgs')
# do grid search on model
# my_grid_search(model,X_train, md.y_train)
model.fit(X_train, md.y_train)
predictions = model.predict(X_test)
pred_df = pd.DataFrame(data=predictions)
output = mean_squared_error(md.y_eval, pred_df) ** 0.5
comparison = pd.concat([md.y_eval_reset, pred_df], axis=1)
comparison.columns = ['original_y', 'predicted_target']
comparison['diff'] = comparison['original_y'] - comparison['predicted_target']
|
from django.db import models
from users.models import CustomUser
from classes.models import Classroom
from materials.models import Materials
import os
class ReadingTime(models.Model):
id = models.AutoField(primary_key = True)
classroom = models.ForeignKey(Classroom, on_delete = models.CASCADE)
student = models.ForeignKey(CustomUser, on_delete = models.CASCADE)
material = models.ForeignKey(Materials, on_delete = models.CASCADE)
duration = models.IntegerField()
updating_time = models.DateTimeField(auto_now_add = True)
def getMatId(self):
return self.material.getMatId()
def __str__(self):
return str(self.classroom) + str(self.material) + str(self.student)
class Meta:
ordering = ['classroom', 'material', 'student', 'updating_time', 'duration']
# content_type 1 --> material
# content_type 2 --> joined class
# content_type 3 --> created class
class ContentFrequency(models.Model):
id = models.AutoField(primary_key = True)
content_id = models.IntegerField()
content_type = models.IntegerField()
user = models.ForeignKey(CustomUser, on_delete = models.CASCADE)
frequency = models.IntegerField()
name = models.CharField(max_length = 400)
def __str__(self):
return str(self.user) + " " + str(self.content_type) + " " + str(self.name) + " " + str(self.frequency)
class Meta:
ordering = ['content_type', 'frequency']
|
# -*- coding: utf-8 -*-
"""
A module containing all code for working with Clipboard
"""
from collections import OrderedDict
from nodeeditor.node_graphics_edge import QDMGraphicsEdge
from nodeeditor.node_edge import Edge
import keyboard
DEBUG = False
DEBUG_PASTING = False
class SceneClipboard():
"""
Class contains all the code for serialization/deserialization from Clipboard
"""
def __init__(self, scene:'Scene'):
"""
:param scene: Reference to the :class:`~nodeeditor.node_scene.Scene`
:type scene: :class:`~nodeeditor.node_scene.Scene`
:Instance Attributes:
- **scene** - reference to the :class:`~nodeeditor.node_scene.Scene`
"""
self.scene = scene
def serializeSelected(self, delete:bool=False) -> OrderedDict:
"""
Serializes selected items in the Scene into ``OrderedDict``
:param delete: True if you want to delete selected items after serialization. Useful for Cut operation
:type delete: ``bool``
:return: Serialized data of current selection in NodeEditor :class:`~nodeeditor.node_scene.Scene`
"""
if DEBUG: print("-- COPY TO CLIPBOARD ---")
sel_nodes, sel_edges, sel_sockets = [], [], {}
# sort edges and nodes
for item in self.scene.grScene.selectedItems():
if hasattr(item, 'node'):
sel_nodes.append(item.node.serialize())
for socket in (item.node.inputs + item.node.outputs):
sel_sockets[socket.id] = socket
elif isinstance(item, QDMGraphicsEdge):
sel_edges.append(item.edge)
# debug
if DEBUG:
print(" NODES\n ", sel_nodes)
print(" EDGES\n ", sel_edges)
print(" SOCKETS\n ", sel_sockets)
# remove all edges which are not connected to a nodeeditor in our list
edges_to_remove = []
for edge in sel_edges:
if edge.start_socket.id in sel_sockets and edge.end_socket.id in sel_sockets:
# if DEBUG: print(" edge is ok, connected with both sides")
pass
else:
if DEBUG: print("edge", edge, "is not connected with both sides")
edges_to_remove.append(edge)
for edge in edges_to_remove:
sel_edges.remove(edge)
# make final list of edges
edges_final = []
for edge in sel_edges:
edges_final.append(edge.serialize())
if DEBUG: print("our final edge list:", edges_final)
data = OrderedDict([
('nodes', sel_nodes),
('edges', edges_final),
])
# if CUT (aka delete) remove selected items
if delete:
self.scene.getView().deleteSelected()
# store our history
self.scene.history.storeHistory("Cut out elements from scene", setModified=True)
return data
def deserializeFromClipboard(self, data:dict, *args, **kwargs):
"""
Deserializes data from Clipboard.
:param data: ``dict`` data for deserialization to the :class:`nodeeditor.node_scene.Scene`.
:type data: ``dict``
"""
hashmap = {}
# calculate mouse pointer - scene position
view = self.scene.getView()
mouse_scene_pos = view.last_scene_mouse_position
# calculate selected objects bbox and center
minx, maxx, miny, maxy = 10000000,-10000000, 10000000,-10000000
for node_data in data['nodes']:
x, y = node_data['pos_x'], node_data['pos_y']
if x < minx: minx = x
if x > maxx: maxx = x
if y < miny: miny = y
if y > maxy: maxy = y
# add width and height of a node
maxx -= 180
maxy += 100
relbboxcenterx = (minx + maxx) / 2 - minx
relbboxcentery = (miny + maxy) / 2 - miny
if DEBUG_PASTING:
print (" *** PASTA:")
print("Copied boudaries:\n\tX:", minx, maxx, " Y:", miny, maxy)
print("\tbbox_center:", relbboxcenterx, relbboxcentery)
# calculate the offset of the newly creating nodes
mousex, mousey = mouse_scene_pos.x(), mouse_scene_pos.y()
# create each node
created_nodes = []
self.scene.setSilentSelectionEvents()
self.scene.doDeselectItems()
for node_data in data['nodes']:
new_node = self.scene.getNodeClassFromData(node_data)(self.scene)
new_node.deserialize(node_data, hashmap, restore_id=False, *args, **kwargs)
created_nodes.append(new_node)
# readjust the new nodeeditor's position
# new node's current position
posx, posy = new_node.pos.x(), new_node.pos.y()
newx, newy = mousex + posx - minx, mousey + posy - miny
new_node.setPos(newx, newy)
new_node.doSelect()
if DEBUG_PASTING:
print("** PASTA SUM:")
print("\tMouse pos:", mousex, mousey)
print("\tnew node pos:", posx, posy)
print("\tFINAL:", newx, newy)
# create each edge
if 'edges' in data:
for edge_data in data['edges']:
new_edge = Edge(self.scene)
new_edge.deserialize(edge_data, hashmap, restore_id=False, *args, **kwargs)
self.scene.setSilentSelectionEvents(False)
# store history
self.scene.history.storeHistory("Pasted elements in scene", setModified=True)
return created_nodes
|
# Copyright (c) 2018-2021 Patricio Cubillos.
# bibmanager is open-source software under the MIT license (see LICENSE).
__all__ = [
'browse',
]
import re
import os
from asyncio import Future, ensure_future
import io
from contextlib import redirect_stdout
import textwrap
import webbrowser
from prompt_toolkit import print_formatted_text, search
from prompt_toolkit.application import Application
from prompt_toolkit.application.current import get_app
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.completion import PathCompleter, WordCompleter
from prompt_toolkit.filters import Condition
from prompt_toolkit.formatted_text import PygmentsTokens
from prompt_toolkit.formatted_text.utils import fragment_list_to_text
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.key_binding.bindings.scroll import (
scroll_half_page_down, scroll_half_page_up,)
from prompt_toolkit.layout.containers import (
Float, FloatContainer, HSplit, VSplit, Window, WindowAlign,)
from prompt_toolkit.layout.controls import FormattedTextControl
from prompt_toolkit.layout.dimension import D
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.layout.menus import CompletionsMenu
from prompt_toolkit.layout.processors import Transformation, Processor
from prompt_toolkit.layout.utils import explode_text_fragments
from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.selection import PasteMode
from prompt_toolkit.styles import Style, style_from_pygments_cls, merge_styles
from prompt_toolkit.widgets import (
Button, Dialog, Label, SearchToolbar, TextArea,)
import pygments
from pygments.lexers.bibtex import BibTeXLexer
from . import bib_manager as bm
from .. import config_manager as cm
from .. import pdf_manager as pm
from .. import utils as u
from ..__init__ import __version__ as ver
help_message = f"""\
h Show this message
enter Select/unselect entry for saving
s Save selected entries to file or screen output
f,/,? Start forward (f or /) or reverse (?) search
e Expand/collapse content of current entry
E Expand/collapse all entries
o Open PDF of entry (ask to fetch if needed)
b Open entry in ADS throught the web browser
q Quit
Navigation
Arrow keys Move up, down, left, and right
g/G Go to first/last line
u/d Scroll up/down
n/N Go to next/previous search occurrence
This is bibmanager version {ver}
Created by Patricio Cubillos."""
class TextInputDialog:
"""Hello, this is doc"""
def __init__(self, title="", label_text="", completer=None):
self.future = Future()
def accept_text(buf):
buf.complete_state = None
self.future.set_result(self.text_area.text)
if self.text_area.text == "":
get_app().exit()
self.text_area = TextArea(
completer=completer,
multiline=False,
width=D(preferred=40),
accept_handler=accept_text,
)
self.dialog = Dialog(
title=title,
body=HSplit([
Label(text=label_text), self.text_area, Label(text="")]),
width=D(preferred=75),
modal=True,
)
def __pt_container__(self):
return self.dialog
class MessageDialog:
def __init__(self, title, text, asking=False):
self.future = Future()
def set_done():
self.future.set_result(None)
def accept():
self.future.set_result(True)
def cancel():
self.future.set_result(False)
if asking:
buttons = [
Button(text="Yes", handler=accept),
Button(text="No", handler=cancel),
]
else:
buttons = [Button(text="OK", handler=set_done)]
text = "\n".join([
textwrap.fill(line, width=71)
for line in text.splitlines()
])
self.dialog = Dialog(
title=title,
body=HSplit([Label(text=text)]),
buttons=buttons,
width=D(preferred=75),
modal=True,
)
def __pt_container__(self):
return self.dialog
def show_message(title, text):
async def coroutine():
dialog = MessageDialog(title, text)
await show_dialog_as_float(dialog)
ensure_future(coroutine())
async def show_dialog_as_float(dialog):
"""Coroutine."""
app = get_app()
root_container = app.layout.container
float_ = Float(content=dialog)
root_container.floats.insert(0, float_)
focused_before = app.layout.current_window
app.layout.focus(dialog)
app.layout.current_window.dialog = dialog
result = await dialog.future
if hasattr(app.layout.current_window, 'dialog'):
del(app.layout.current_window.dialog)
app.layout.focus(focused_before)
if float_ in root_container.floats:
root_container.floats.remove(float_)
return result
class HighlightEntryProcessor(Processor):
"""Processor to highlight a list of texts in the document."""
match_fragment = " class:search "
selected_entries = []
def toggle_selected_entry(self, entry_key):
"""Select/deselect entry_key from the list of highlighted texts."""
if entry_key in self.selected_entries:
self.selected_entries.remove(entry_key)
else:
self.selected_entries.append(entry_key)
def apply_transformation(self, transformation_input):
(
buffer_control,
document,
lineno,
source_to_display,
fragments,
_,
_,
) = transformation_input.unpack()
if self.selected_entries and not get_app().is_done:
# For each search match, replace the style string.
line_text = fragment_list_to_text(fragments)
fragments = explode_text_fragments(fragments)
pattern = "|".join(re.escape(key) for key in self.selected_entries)
matches = re.finditer(pattern, line_text, flags=re.RegexFlag(0))
for match in matches:
for i in range(match.start(), match.end()):
old_fragment, text, *_ = fragments[i]
fragments[i] = (
old_fragment + self.match_fragment,
fragments[i][1],
)
return Transformation(fragments)
def get_current_key(doc, keys, get_start_end=False, get_expanded=False):
"""
Get the key for the bibtex entry currently under the cursor.
"""
position = doc.cursor_position
if doc.current_line in keys:
is_expanded = False
key = doc.current_line
if get_start_end:
start_pos = position + doc.get_start_of_line_position()
end_pos = position + doc.get_end_of_line_position()
else:
is_expanded = True
start_pos = position
if doc.current_char != '@':
start_pos += doc.find_backwards('@')
key_start = doc.text.find('{', start_pos)
key_end = doc.text.find(',', start_pos)
key = doc.text[key_start+1:key_end].strip()
if get_start_end:
end_pos = u.find_closing_bracket(doc.text, start_pos) + 2
if not (get_start_end or get_expanded):
return key
output = [key]
if get_start_end:
output.append((start_pos, end_pos))
if get_expanded:
output.append(is_expanded)
return tuple(output)
def browse():
"""
A browser for the bibmanager database.
"""
# Content of the text buffer:
bibs = bm.load()
keys = [bib.key for bib in bibs]
compact_text = "\n".join(keys)
expanded_text = "\n\n".join(bib.content for bib in bibs)
# A list object, since I want this to be a global variable
selected_content = [None]
lex_style = style_from_pygments_cls(
pygments.styles.get_style_by_name(cm.get('style')))
custom_style = Style.from_dict({
"status": "reverse",
"status.position": "#aaaa00",
"status.key": "#ffaa00",
"shadow": "bg:#440044",
"not-searching": "#888888",
})
style = merge_styles([lex_style, custom_style])
def get_menubar_text():
return [
("class:status", " ("),
("class:status.key", "enter"),
("class:status", ")select entry ("),
("class:status.key", "e"),
("class:status", ")xpand entry ("),
("class:status.key", "f"),
("class:status", ")ind ("),
("class:status.key", "s"),
("class:status", ")ave ("),
("class:status.key", "h"),
("class:status", ")elp ("),
("class:status.key", "q"),
("class:status", ")uit"),
]
def get_menubar_right_text():
"""Get index of entry under cursor."""
key = get_current_key(text_field.buffer.document, keys)
return f" {keys.index(key) + 1} "
def get_infobar_text():
"""Get author-year-title of entry under cursor."""
key = get_current_key(text_field.buffer.document, keys)
bib = bibs[keys.index(key)]
year = '' if bib.year is None else bib.year
title = 'NO_TITLE' if bib.title is None else bib.title
return f"{bib.get_authors('ushort')}{year}: {title}"
search_buffer = Buffer(
completer=WordCompleter(keys),
complete_while_typing=False,
multiline=False)
search_field = SearchToolbar(
search_buffer=search_buffer,
forward_search_prompt = "Search: ",
backward_search_prompt = "Search backward: ",
ignore_case=False)
text_field = TextArea(
text=compact_text,
lexer=PygmentsLexer(BibTeXLexer),
scrollbar=True,
line_numbers=False,
read_only=True,
search_field=search_field,
input_processors=[HighlightEntryProcessor()],
)
text_field.buffer.name = 'text_area_buffer'
text_field.is_expanded = False
# Shortcut to HighlightEntryProcessor:
for processor in text_field.control.input_processors:
if processor.__class__.__name__ == 'HighlightEntryProcessor':
text_field.bm_processor = processor
# Do not highlight searched text:
sp = text_field.control.default_input_processors[0]
sp._classname = ' '
sp._classname_current = ' '
menu_bar = VSplit([
Window(
FormattedTextControl(get_menubar_text),
style="class:status"),
Window(
FormattedTextControl(get_menubar_right_text),
style="class:status.right",
width=9,
align=WindowAlign.RIGHT),
],
height=1,
)
info_bar = Window(
content=FormattedTextControl(get_infobar_text),
height=D.exact(1),
style="class:status",
)
body = HSplit([
menu_bar,
text_field,
search_field,
info_bar,
])
root_container = FloatContainer(
content=body,
floats=[
Float(
xcursor=True,
ycursor=True,
content=CompletionsMenu(max_height=16, scroll_offset=1),
),
],
)
# Key bindings:
bindings = KeyBindings()
text_focus = Condition(
lambda: get_app().layout.current_window == text_field.window)
dialog_focus = Condition(
lambda: hasattr(get_app().layout.current_window, 'dialog'))
@bindings.add("q", filter=text_focus)
def _quit(event):
event.app.exit()
# Navigation:
@bindings.add("g", filter=text_focus)
def _go_to_first_line(event):
event.current_buffer.cursor_position = 0
@bindings.add("G", filter=text_focus)
def _go_to_last_line(event) -> None:
event.current_buffer.cursor_position = len(event.current_buffer.text)
@bindings.add("d", filter=text_focus)
def _scroll_down(event):
scroll_half_page_down(event)
@bindings.add("u", filter=text_focus)
def _scroll_up(event):
scroll_half_page_up(event)
@bindings.add("n", filter=text_focus)
def _find_next(event):
search_state = event.app.current_search_state
event.current_buffer.apply_search(
search_state, include_current_position=False, count=event.arg)
@bindings.add("N", filter=text_focus)
def _find_previous(event):
search_state = event.app.current_search_state
event.current_buffer.apply_search(
~search_state, include_current_position=False, count=event.arg)
@bindings.add("h", filter=text_focus)
def _show_help(event):
show_message("Shortcuts", help_message)
@bindings.add("f", filter=text_focus)
def _start_search(event):
search.start_search(direction=search.SearchDirection.FORWARD)
@bindings.add("b", filter=text_focus)
def _open_in_browser(event):
key = get_current_key(event.current_buffer.document, keys)
bib = bm.find(key=key, bibs=bibs)
if bib.adsurl is not None:
webbrowser.open(bib.adsurl, new=2)
else:
show_message("Message", f"Entry '{key}' does not have an ADS url.")
@bindings.add("c-c", filter=dialog_focus)
def _close_dialog(event):
get_app().layout.current_window.dialog.future.set_result(None)
@bindings.add("s", filter=text_focus)
def _save_selected_to_file(event):
selected = text_field.bm_processor.selected_entries
if len(selected) == 0:
show_message("Message", "Nothing to save.")
return
async def coroutine():
dialog = TextInputDialog(
title="Save to File",
label_text="\nEnter a file path or leave blank to quit "
"and print to screen:\n(press Control-c to cancel)\n",
completer=PathCompleter(),
)
path = await show_dialog_as_float(dialog)
content = '\n\n'.join(
bibs[keys.index(key)].content for key in selected)
if path == "":
selected_content[0] = content
# The program termination is in TextInputDialog() since I
# need to close this coroutine first.
return
if path is not None:
try:
with open(path, "w") as f:
f.write(content)
except IOError as e:
show_message("Error", str(e))
ensure_future(coroutine())
@bindings.add("enter", filter=text_focus)
def _toggle_selected_entry(event):
"Select/deselect entry pointed by the cursor."
key = get_current_key(event.current_buffer.document, keys)
text_field.bm_processor.toggle_selected_entry(key)
@bindings.add("e", filter=text_focus)
def _expand_collapse_entry(event):
"Expand/collapse current entry."
key, start_end, is_expanded = get_current_key(
event.current_buffer.document, keys,
get_start_end=True, get_expanded=True)
bib = bm.find(key=key, bibs=bibs)
if is_expanded:
event.app.clipboard.set_text(bib.key)
else:
event.app.clipboard.set_text(bib.content + '\n')
text_field.read_only = False
event.current_buffer.cursor_position = start_end[0]
event.current_buffer.delete(count=start_end[1] - start_end[0])
event.current_buffer.paste_clipboard_data(
event.app.clipboard.get_data(), count=event.arg,
paste_mode=PasteMode.VI_BEFORE)
text_field.read_only = True
if is_expanded:
event.current_buffer.cursor_position = start_end[0]
@bindings.add("E", filter=text_focus)
def _expand_collapse_all(event):
"Expand/collapse all entries."
buffer = event.current_buffer
key = get_current_key(buffer.document, keys)
if text_field.is_expanded:
text_field.text = compact_text
else:
text_field.text = expanded_text
buffer.cursor_position = buffer.text.index(key)
text_field.is_expanded = not text_field.is_expanded
@bindings.add("o", filter=text_focus)
def _open_pdf(event):
buffer = event.current_buffer
key = get_current_key(buffer.document, keys)
bib = bm.find(key=key, bibs=bibs)
has_pdf = bib.pdf is not None
has_bibcode = bib.bibcode is not None
is_missing = has_pdf and not os.path.exists(f'{u.BM_PDF()}{bib.pdf}')
if not has_pdf and not has_bibcode:
show_message("Message",
f"BibTeX entry '{key}' does not have a PDF.")
return
if has_pdf and not is_missing:
pm.open(key=key)
#except Exception as e:
# show_message("Message", textwrap.fill(str(e), width=70))
return
if has_pdf and is_missing and not has_bibcode:
show_message("Message",
f"BibTeX entry has a PDF file: {bib.pdf}, but the file "
"could not be found.")
return
# Need to fetch before opening:
async def coroutine():
dialog = MessageDialog(
"PDF file not found",
"Fetch from ADS?\n(might take a few seconds ...)",
asking=True)
fetch = await show_dialog_as_float(dialog)
if fetch:
with io.StringIO() as buf, redirect_stdout(buf):
fetched = pm.fetch(bib.bibcode, replace=True)
fetch_output = buf.getvalue()
if fetched is None:
show_message("PDF fetch failed", fetch_output)
else:
show_message("PDF fetch succeeded.", fetch_output)
pm.open(key=key)
ensure_future(coroutine())
application = Application(
layout=Layout(root_container, focused_element=text_field),
key_bindings=bindings,
enable_page_navigation_bindings=True,
style=style,
full_screen=True,
)
application.run()
if selected_content[0] is not None:
tokens = list(pygments.lex(selected_content[0], lexer=BibTeXLexer()))
print_formatted_text(
PygmentsTokens(tokens),
end="",
style=lex_style,
#output=create_output(sys.stdout),
)
|
largura = 17
Altura = 12.0
print(largura // 2)
print(largura/2.0)
print(Altura/3)
print(1 + 2 * 5)
|
""" Module for downloading ARM data. """
import argparse
import json
import sys
import os
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
def download_data(username, token, datastream,
startdate, enddate, time=None, output=None):
"""
This tool will help users utilize the ARM Live Data Webservice to download
ARM data.
Parameters
----------
username : str
The username to use for logging into the ADC archive.
token : str
The access token for accessing the ADC archive.
datastream : str
The name of the datastream to acquire.
startdate : str
The start date of the data to acquire. Format is YYYY-MM-DD.
enddate : str
The end date of the data to acquire. Format is YYYY-MM-DD.
time: str or None
The specific time. Format is HHMMSS. Set to None to download all files
in the given date interval.
output : str
The output directory for the data. Set to None to make a folder in the
current working directory with the same name as *datastream* to place
the files in.
Notes
-----
This programmatic interface allows users to query and automate
machine-to-machine downloads of ARM data. This tool uses a REST URL and
specific parameters (saveData, query), user ID and access token, a
datastream name, a start date, and an end date, and data files matching
the criteria will be returned to the user and downloaded.
By using this web service, users can setup cron jobs and automatically
download data from /data/archive into their workspace. This will also
eliminate the manual step of following a link in an email to download data.
All other data files, which are not on the spinning
disk (on HPSS), will have to go through the regular ordering process.
More information about this REST API and tools can be found on `ARM Live
<https://adc.arm.gov/armlive/#scripts>`_.
To login/register for an access token click `here
<https://adc.arm.gov/armlive/livedata/home>`_.
Author: Michael Giansiracusa
Email: giansiracumt@ornl.gov
Web Tools Contact: Ranjeet Devarakonda zzr@ornl.gov
Examples
--------
This code will download the netCDF files from the sgpmetE13.b1 datastream
and place them in a directory named sgpmetE13.b1. The data from 14 Jan to
20 Jan 2017 will be downloaded. Replace *userName* and *XXXXXXXXXXXXXXXX*
with your username and token for ARM Data Discovery. See the Notes for
information on how to obtain a username and token.
.. code-block:: python
act.discovery.download_data('userName','XXXXXXXXXXXXXXXX', 'sgpmetE13.b1',
'2017-01-14', '2017-01-20')
"""
# default start and end are empty
start, end = '', ''
# start and end strings for query_url are constructed
# if the arguments were provided
if startdate:
start = "&start={}".format(startdate)
if enddate:
end = "&end={}".format(enddate)
# build the url to query the web service using the arguments provided
query_url = ('https://adc.arm.gov/armlive/livedata/query?' +
'user={0}&ds={1}{2}{3}&wt=json').format(
':'.join([username, token]), datastream, start, end)
# get url response, read the body of the message,
# and decode from bytes type to utf-8 string
response_body = urlopen(query_url).read().decode("utf-8")
# if the response is an html doc, then there was an error with the user
if response_body[1:14] == "!DOCTYPE html":
raise ConnectionRefusedError("Error with user. Check username or token.")
# parse into json object
response_body_json = json.loads(response_body)
# construct output directory
if output:
# output files to directory specified
output_dir = os.path.join(output)
else:
# if no folder given, add datastream folder
# to current working dir to prevent file mix-up
output_dir = os.path.join(os.getcwd(), datastream)
# not testing, response is successful and files were returned
num_files = len(response_body_json["files"])
if response_body_json["status"] == "success" and num_files > 0:
for fname in response_body_json['files']:
if time is not None:
if time not in fname:
continue
print("[DOWNLOADING] {}".format(fname))
# construct link to web service saveData function
save_data_url = ("https://adc.arm.gov/armlive/livedata/" +
"saveData?user={0}&file={1}").format(
':'.join([username, token]), fname)
output_file = os.path.join(output_dir, fname)
# make directory if it doesn't exist
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# create file and write bytes to file
with open(output_file, 'wb') as open_bytes_file:
open_bytes_file.write(urlopen(save_data_url).read())
else:
print("No files returned or url status error.\n"
"Check datastream name, start, and end date.")
|
import project.functions as func
class Calculator:
def __init__(self):
"""Default calculator memory is set to 0"""
self.memory = 0
def do_function(self, number, function):
"""This function passes the number in calculator memory as an argument in a given function.
The other number to be passed in a function is defined by the user."""
try:
self.memory = function(self.memory, number)
return self.memory
except TypeError:
print('Oops! That was no valid number. Try again...')
def add(self, addend):
"""This function adds a provided number to the number in calculator memory"""
return self.do_function(addend, func.addition)
def subtract(self, subtrahend):
"""This function subtracts the number provided by the user from the number in calculator memory"""
return self.do_function(subtrahend, func.subtraction)
def multiply(self, multiplier):
"""This function multiplies the number in calculator memory by the number provided (multiplier)"""
return self.do_function(multiplier, func.multiplication)
def nthroot(self, n):
"""This function takes the root provided by the user from the number in calculator memory if the number
provided is positive"""
return self.do_function(n, func.take_root)
def divide(self, divisor):
"""This function divides the number in calculator memory by the number provided (divisor)"""
if divisor == 0:
return 'Cannot divide by 0'
else:
return self.do_function(divisor, func.division)
def reset(self):
"""This function resets calculator memory to 0"""
self.memory = 0
return f'Calculator memory is set to {self.memory}'
|
"""
DataFrame 모듈
pandas DataFrame을 이용한 Data Wrapper, Query
"""
import functools
import pandas as pd
import numpy as np
import warnings
import sqlalchemy
from collections import defaultdict
from datetime import datetime
from typing import List, Callable, Generator
from daqm.data.data import Data
from daqm.data.columns import Column, OperatedColumn, ConstantColumn, SimpleColumn, FunctionalColumn
from daqm.data.query import Query
def convert_multiindex_columns(
df: pd.DataFrame,
delimiter: str = "$"
) -> List[str]:
"""
pandas.DataFrame의 multi index 컬럼을 delimiter를 이용해 string으로 변환합니다.
:example: ("ABC", "DEF") -> "ABC$DEF"
:param panas.DataFrame df:
변환할 컬럼이 있는 DataFrame
:param str delimiter:
사용할 구분자.
:return: 변환된 컬럼 이름 배열
:rtype: list
"""
cols = []
for col in df.columns:
if isinstance(col, str):
cols.append(col)
else:
cols.append(delimiter.join(col))
return cols
class DataFrameQuery:
"""
pandas DataFrame에 Query하기 위한 class
:param pandas.DataFrame df:
쿼리할 데이터가 있는 pandas Dataframe
:param Query query:
적용할 Query
"""
@classmethod
def query(
cls,
df: pd.DataFrame,
query: Query
) -> pd.DataFrame:
"""
쿼리를 적용해 DataFrame으로 리턴
:param pandas.DataFrame df:
쿼리할 데이터가 있는 pandas Dataframe
:param Query query:
적용할 Query
:return: 쿼리가 적용된 pandas DataFrame
:rtype: pandas.DataFrame
"""
df = pd.DataFrame(df)
return cls(df, query).df
def __init__(self, df: pd.DataFrame, query: Query):
"""
Initialize self. See help(type(self)) for accurate signature.
"""
self.query = query
self.df = df
self._parse_query()
def _parse_query(self):
"""
Query Parsing
"""
self._parse_join()
self._parse_where()
if self.query.groupby_list:
self._parse_groupby()
else:
self._parse_normal()
@staticmethod
def _check_and_add_column(
df: pd.DataFrame,
col: Column,
child_only: bool = False
):
"""
컬럼 연산을 위해 필요한 값들을 계산해 df에 추가해준다.
"""
for child_col in col.children:
DataFrameQuery._check_and_add_column(df, child_col)
if child_only:
return
if isinstance(col, SimpleColumn):
if col.target_column_name not in df.columns:
raise ValueError(f"Column {col.target_column_name} not in DataFrame.")
df.loc[:, col.name] = df[col.target_column_name]
elif isinstance(col, ConstantColumn):
df[col.name] = col.value
elif isinstance(col, FunctionalColumn):
# NOTE QueryFunction Marker
# If add new function in QueryFunction, must add it's implementation here.
res_col = None
if col.func == "sum":
res_col = df[col.columns[0].name].sum()
elif col.func == "min":
res_col = df[col.columns[0].name].min()
elif col.func == "max":
res_col = df[col.columns[0].name].max()
elif col.func == "avg":
res_col = df[col.columns[0].name].mean()
elif col.func == "stddev":
res_col = df[col.columns[0].name].std()
elif col.func == "count":
res_col = df[col.columns[0].name].count()
elif col.func == "unique":
res_col = df[col.columns[0].name].unique()
if not col.options["to_string"]:
warnings.warn("DataFrameData doesn't support array. It will be changed to comma separated string")
if isinstance(res_col, np.ndarray):
res_col = col.options["string_delimiter"].join(map(str, sorted(res_col)))
res_col = pd.Series(res_col, index=df.index)
else:
res_col = res_col.apply(lambda x: col.options["string_delimiter"].join(map(str, sorted(x))))
elif col.func == "nunique":
res_col = df[col.columns[0].name].nunique()
elif col.func == "all":
res_col = df[col.columns[0].name].values
if isinstance(res_col, np.ndarray):
res_col = col.options["string_delimiter"].join(map(str, res_col))
res_col = pd.Series(res_col, index=df.index)
else:
res_col = res_col.apply(lambda x: col.options["string_delimiter"].join(map(str, x)))
elif col.func in ("percentile_cont", "percentile_disc"):
res_col = df[col.columns[0].name].quantile(
q=col.options["q"],
interpolation="linear" if col.func == "percentile_cont" else "nearest"
)
elif col.func == "abs":
res_col = df[col.columns[0].name].abs()
elif col.func == "round":
res_col = df[col.columns[0].name].round(col.options["decimals"])
elif col.func == "ceil":
res_col = np.ceil(df[col.columns[0].name])
elif col.func == "trunc":
res_col = np.trunc(df[col.columns[0].name])
elif col.func == "floor":
res_col = np.floor(df[col.columns[0].name])
elif col.func == "rank":
if col.columns[1] is None:
res_col = df[col.columns[0].name].rank()
else:
res_col = df.groupby(col.columns[1].name)[col.columns[0].name].rank()
elif col.func == "date_diff":
res_col = (df[col.columns[0].name] - df[col.columns[1].name]) / np.timedelta64(1, "D")
elif col.func == "date_year":
res_col = df[col.columns[0].name].dt.year
elif col.func == "date":
year_col = col.columns[0].name
month_col = col.columns[1].name
day_col = col.columns[2].name
res_col = df.apply(lambda x: datetime(
int(x[year_col]),
int(x[month_col]),
int(x[day_col])), axis=1)
elif col.func == "date_delta":
value_col = col.columns[0]
if isinstance(value_col, ConstantColumn):
res_col = pd.to_timedelta(value_col.value, unit="days")
else:
res_col = pd.to_timedelta(df[value_col.name], unit="days")
elif col.func == "case":
res_col = pd.Series(None, index=df.index)
for idx in range(0, len(col.columns), 2):
if idx == len(col.columns) - 1:
# It's else col
value_col = col.columns[idx]
if isinstance(value_col, ConstantColumn):
each_res_col = pd.Series(value_col.value, index=df.index)
else:
each_res_col = df[value_col.name]
else:
condition_col = col.columns[idx]
value_col = col.columns[idx + 1]
def apply_condition(x):
"""조건 적용"""
if x[condition_col.name]:
if isinstance(value_col, ConstantColumn):
return value_col.value
else:
return x[value_col.name]
else:
return None
each_res_col = df.apply(apply_condition, axis=1)
if each_res_col.empty:
each_res_col = pd.Series(None, dtype="float64")
res_col = res_col.fillna(each_res_col)
elif col.func == "coalesce":
res_col = pd.Series(None, index=df.index)
for each_col in col.columns:
if isinstance(each_col, ConstantColumn):
each_res_col = pd.Series(each_col.value, index=df.index)
else:
each_res_col = df[each_col.name]
res_col = res_col.fillna(each_res_col)
elif col.func == "isnull":
res_col = pd.isnull(df[col.columns[0].name])
elif col.func == "notnull":
res_col = pd.notnull(df[col.columns[0].name])
elif col.func in ("in", "notin"):
in_cols = [
in_col.value if isinstance(in_col, ConstantColumn) else col.name
for in_col in col.columns[1:]
]
res_col = df[col.columns[0].name].isin(in_cols)
if col.func == "notin":
res_col = ~res_col
elif col.func == "greatest":
col_list = [each_col.name for each_col in col.columns]
res_col = df[col_list].apply(
lambda x: x.max()
if x.isna() is False
else x.dropna().max(),
axis=1
)
elif col.func == "least":
col_list = [each_col.name for each_col in col.columns]
res_col = df[col_list].apply(
lambda x: x.min()
if x.isna() is False
else x.dropna().min(),
axis=1
)
elif col.func == "and":
condition_df_list = [df[each_col.name] for each_col in col.columns]
res_col = functools.reduce(np.logical_and, condition_df_list)
elif col.func == "or":
condition_df_list = [df[each_col.name] for each_col in col.columns]
res_col = functools.reduce(np.logical_or, condition_df_list)
elif col.func == "cast":
# TODO: numeric or decimal: 사용자 지정 정밀도 유형 추가
# TODO: interval 추가(입력되는 unit에 따라 to_timedelta 함수의 unit 파라미터 변경)
convert_map_dict = {
"bigint": ["float", "Int64"],
"int": ["float", "Int32"],
"smallint": ["float", "Int16"],
"boolean": ["boolean"],
"double precision": ["float64"],
"float": ["float64"],
"real": ["float32"],
"date": ["datetime64[ns]"],
"datetime": ["datetime64[ns]"],
"time": ["datetime64[ns]"],
"char": [str],
"varchar": [str],
"text": [str]
}
target_type_list = convert_map_dict.get(col.options["target_type"])
res_col = df[col.columns[0].name]
for target_type in target_type_list:
res_col = res_col.astype(target_type)
if col.options["target_type"] == "date":
res_col = res_col.dt.date
elif col.options["target_type"] == "time":
res_col = res_col.dt.time
else:
raise NotImplementedError(f"Function {col.func} not implemented for DataFrame.")
df.loc[:, col.name] = res_col
elif isinstance(col, OperatedColumn):
res_col = None
left_col = col.l_column.value if col.l_column.is_constant() else df[col.l_column.name]
right_col = col.r_column.value if col.r_column.is_constant() else df[col.r_column.name]
# NOTE ColumnOperator Marker
# If add new function in QueryFunction, must add it's implementation here.
if col.operator == "add":
res_col = left_col + right_col
elif col.operator == "sub":
res_col = left_col - right_col
elif col.operator == "mul":
res_col = left_col * right_col
elif col.operator == "div":
res_col = left_col / right_col
elif col.operator == "floordiv":
res_col = left_col // right_col
elif col.operator == "lt":
res_col = left_col < right_col
elif col.operator == "le":
res_col = left_col <= right_col
elif col.operator == "eq":
res_col = left_col == right_col
elif col.operator == "ne":
res_col = left_col != right_col
elif col.operator == "gt":
res_col = left_col > right_col
elif col.operator == "ge":
res_col = left_col >= right_col
elif col.operator in ("like", "ilike", "notlike", "notilike"):
right_col = "^" + right_col + "$"
right_col = right_col.replace("%%", r"(.|\s)*")
right_col = right_col.replace("_", r"(.|\s)")
case = col.operator in ("like", "notlike")
if col.operator in ("notlike", "notilike"):
right_col = rf"(?!{right_col})"
res_col = left_col.str.match(right_col, case=case)
df.loc[:, col.name] = res_col
def _parse_where(self):
"""
where문 Parsing
"""
for col in self.query.where_list:
self._check_and_add_column(self.df, col)
self.df = self.df[self.df[col.name]]
def _parse_join(self):
"""
join문 Parsing
"""
for data, left_on, right_on, how, suffixes in self.query.join_list:
right_df = data.to_df()
left_on_names = []
right_on_names = []
for col in left_on:
self._check_and_add_column(self.df, col)
left_on_names.append(col.name)
for col in right_on:
self._check_and_add_column(right_df, col)
right_on_names.append(col.name)
self.df = pd.merge(
self.df,
right_df,
left_on=left_on_names,
right_on=right_on_names,
how=how,
suffixes=suffixes
)
def _is_array_agg(self, agg_type):
"""
입력된 agg_type이 배열을 리턴하는지 여부
"""
return agg_type in ["unique", "all"]
def _parse_groupby_agg(self, col: Column) -> (str, str, str, str):
"""
groupby 컬럼 Parsing
:return:
컬럼 이름, Aggregation 타입, Aggregate적용 후 컬럼 이름, 변경할 컬럼 이름
"""
# TODO
# There are functions or operations that are not allowed in select statement with groupby.
# Need to raise Exception.
is_functional = isinstance(col, FunctionalColumn)
self._check_and_add_column(self.df, col, child_only=is_functional)
agg_col_name = col.name
agg_type = agg_type_col_name = "first"
rename_key = None
rename_value = None
if is_functional:
# NOTE QueryFunction Marker
# If add new function in QueryFunction, must add it's implementation here.
# Here is for group by agg functions. Might not be needed.
agg_type = agg_type_col_name = col.func
if agg_type == "avg":
agg_type = agg_type_col_name = "mean"
elif agg_type == "nunique":
agg_type = pd.Series.nunique
elif self._is_array_agg(agg_type):
if not col.options["to_string"]:
warnings.warn("DataFrameData doesn't support array. It will be changed to comma separated string")
if agg_type == "all":
agg_type = list
agg_type_col_name = "list"
agg_col_name = col.columns[0].name
rename_key = "$".join((agg_col_name, agg_type_col_name))
rename_value = col.name
return (agg_col_name, agg_type, rename_key, rename_value)
def _parse_groupby(self):
"""
groupby가 있는 쿼리 Parsing
"""
groupby_column_names = []
for col in self.query.groupby_list:
self._check_and_add_column(self.df, col)
groupby_column_names.append(col.name)
select_column_names = []
agg_col_dict = defaultdict(set)
rename_dict = defaultdict(list)
array_cols = [] # must change array to string if unique in agg_type
for col in self.query.select_list:
if col.name in select_column_names:
raise ValueError("Duplicate column in select clause. Use label('new_name') to avoid ambiguity.")
select_column_names.append(col.name)
if col in self.query.groupby_set:
continue
agg_col_name, agg_type, rename_key, rename_value = self._parse_groupby_agg(col)
agg_col_dict[agg_col_name].add(agg_type)
if rename_key:
rename_dict[rename_key].append(rename_value)
if self._is_array_agg(agg_type):
array_cols.append((rename_key, col.options["string_delimiter"]))
orderby_column_names = []
orderby_ascendings = []
for col in self.query.orderby_list:
orderby_column_names.append(col.name)
orderby_ascendings.append(not col.is_desc())
if col in self.query.groupby_set:
continue
agg_col_name, agg_type, rename_key, rename_value = self._parse_groupby_agg(col)
agg_col_dict[agg_col_name].add(agg_type)
if rename_key:
rename_dict[rename_key].append(rename_value)
if self._is_array_agg(agg_type):
array_cols.append((rename_key, col.options["string_delimiter"]))
self.df = self.df.groupby(groupby_column_names).agg(agg_col_dict)
self.df.columns = convert_multiindex_columns(self.df)
for col, delimiter in array_cols:
self.df.loc[:, col] = self.df[col].apply(lambda x: delimiter.join(map(str, sorted(x))))
for rename_key in rename_dict:
for new_col_name in rename_dict[rename_key]:
self.df.loc[:, new_col_name] = self.df[rename_key]
if len(self.df) == 0:
# when data frame is empty, reset_index() does't reset groupby columns
self.df.reset_index(drop=True)
for col in groupby_column_names:
self.df[col] = pd.Series(None, self.df.index)
else:
self.df = self.df.reset_index()
if orderby_column_names:
self.df = self.df.sort_values(orderby_column_names, ascending=orderby_ascendings)
if select_column_names:
self.df = self.df[select_column_names]
def _parse_normal(self):
"""
groupby가 없는 일반 쿼리 Parsing
"""
select_column_names = []
for col in self.query.select_list:
self._check_and_add_column(self.df, col)
if col.name in select_column_names:
raise ValueError("Duplicate column in select clause. Use label(\"new_name\") to avoid ambiguity.")
select_column_names.append(col.name)
orderby_column_names = []
orderby_ascendings = []
for col in self.query.orderby_list:
self._check_and_add_column(self.df, col)
orderby_column_names.append(col.name)
orderby_ascendings.append(not col.is_desc())
if orderby_column_names:
self.df = self.df.sort_values(orderby_column_names, ascending=orderby_ascendings)
if select_column_names:
self.df = self.df[select_column_names]
# agg 함수라면 drop_duplicates해주어야 1줄로 간다.
if self.query.is_agg:
self.df = self.df.drop_duplicates()
class DataFrameData(Data):
"""
pandas.DataFrame Wrapper
:param pandas.DataFrame data_df:
pandas DataFrame 오브젝트
"""
@classmethod
def from_sql(cls, conn: sqlalchemy.engine.Connection, sql: str) -> "DataFrameData":
"""
DB 쿼리 결과를 DataFrameData로 만듭니다
:param conn:
DB Connection
:param str sql:
SQL문
:return: 쿼리 결과를 담은 DataFrameData
:rtype: DataFrameData
"""
return cls(pd.read_sql(sql, conn))
def __init__(self, data_df: pd.DataFrame):
"""
Initialize self. See help(type(self)) for accurate signature.
"""
self.data_df = data_df
super().__init__(self.data_df.columns)
def apply_query(self, query: Query) -> "DataFrameData":
"""
Query를 적용합니다.
:param Query query:
적용할 Query Object
:return: Query가 적용된 Data
:rtype: DataFrameData
"""
return DataFrameData(DataFrameQuery.query(self.data_df.copy(), query))
def to_df(self) -> pd.DataFrame:
"""
Data를 pandas DataFrame으로 변환합니다.
:return: pandas.DataFrame 형태로 변환된 데이터.
:rtype: pandas.DataFrame
"""
return self.data_df
def rename_column(self, rename_dict: dict) -> "DataFrameData":
"""
컬럼의 이름을 재정의합니다.
:param dict rename_dict:
기존 컬럼 이름을 key, 변경할 컬럼 이름을 value로 하는 dict
:return: 컬럼 이름이 변경된 Data
:rtype: DataFrameData
"""
data_df = self.data_df.rename(rename_dict, axis="columns")
return DataFrameData(data_df)
def copy(self) -> "DataFrameData":
"""
현재 Data를 복사합니다.
:return: 복사된 Data
:rtype: DataFrameData
"""
return DataFrameData(pd.DataFrame(self.data_df))
def apply_function(self,
func: Callable[[List], List],
columns: List[str] = None):
"""
데이터에 함수를 적용합니다.
:param Callable func:
데이터 한 줄을 list로 입력 받아 변환된 줄을 list로 리턴하는 함수.
:param list columns:
적용 후 컬럼 이름.
:return: 함수가 적용된 Data
:rtype: DataFrameData
"""
data_df = self.data_df.apply(lambda x: pd.Series(func(x)), axis=1)
if columns is None:
data_df.columns = [str(i) for i in range(len(data_df.columns))]
else:
data_df.columns = columns
return DataFrameData(data_df)
def iter(self) -> Generator[list, None, None]:
"""
데이터를 한 줄씩 탐색합니다.
:return: 한 줄씩 list로 리턴하는 Generator.
:rtype: generator
"""
for row in self.data_df.values:
yield row.tolist()
def count(self) -> int:
"""
데이터의 row 수를 가져옵니다.
:return: 데이터 총 row 수
:rtype: int
"""
return len(self.data_df)
|
def say_name():
return 'module_one'
|
import networkx.algorithms.tests.test_covering
import pytest
from graphscope.experimental.nx.utils.compat import import_as_graphscope_nx
import_as_graphscope_nx(networkx.algorithms.tests.test_covering,
decorators=pytest.mark.usefixtures("graphscope_session"))
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
from drawquest.apps.drawquest_auth.models import User
from drawquest.apps.drawquest_auth.details_models import UserDetails
from canvas.redis_models import redis
completed_user_ids = [int(e) for e in redis.smembers('following_migration_completed_user_ids')]
try:
highest_completed = max(completed_user_ids)
except ValueError:
highest_completed = 0
all_users = User.objects.all()
print 'Gathering all_usernames...'
all_usernames = dict((int(id_), username.lower()) for id_, username in all_users.values_list('id', 'username'))
print 'Done'
print 'Beginning all_users iteration...'
for user in all_users.filter(id__gte=highest_completed):
if user.id in completed_user_ids:
print 'Skipping {}'.format(user.id)
continue
print user.username, '({})'.format(user.id)
followers = user.redis.followers.smembers()
ids = sorted(followers, key=lambda f: all_usernames.get(int(f), 'fff'))
for f in reversed(ids):
user.redis.new_followers.bump(f)
user.redis.new_following.zremrangebyrank(0, -1)
following = user.redis.following.smembers()
ids = sorted(following, key=lambda f: all_usernames.get(int(f), 'fff'))
for f in reversed(ids):
user.redis.new_following.bump(f)
redis.sadd('following_migration_completed_user_ids', user.id)
redis.delete('following_migration_completed_user_ids')
print
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'canvas.apiapp': {
'Meta': {'object_name': 'APIApp'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'canvas.apiauthtoken': {
'Meta': {'unique_together': "(('user', 'app'),)", 'object_name': 'APIAuthToken'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.APIApp']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'attribution_copy': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'attribution_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': u"orm['canvas.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': u"orm['canvas.Comment']"}),
'posted_on_quest_of_the_day': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': u"orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'skip_moderation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'star_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True', 'blank': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'ugq': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'canvas.commentflag': {
'Meta': {'object_name': 'CommentFlag'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': u"orm['auth.User']"})
},
u'canvas.commentmoderationlog': {
'Meta': {'object_name': 'CommentModerationLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderated_comments_log'", 'to': u"orm['auth.User']"}),
'visibility': ('django.db.models.fields.IntegerField', [], {})
},
u'canvas.commentpin': {
'Meta': {'object_name': 'CommentPin'},
'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.commentsticker': {
'Meta': {'object_name': 'CommentSticker'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': u"orm['canvas.Comment']"}),
'epic_message': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '140', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'canvas.commentstickerlog': {
'Meta': {'object_name': 'CommentStickerLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': u"orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': u"orm['canvas.Content']"}),
'stroke_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'canvas.emailunsubscribe': {
'Meta': {'object_name': 'EmailUnsubscribe'},
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'canvas.facebookinvite': {
'Meta': {'object_name': 'FacebookInvite'},
'fb_message_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_fbid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_invited_from'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_sent_invites'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"})
},
u'canvas.facebookuser': {
'Meta': {'object_name': 'FacebookUser'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_by': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['canvas.FacebookUser']", 'symmetrical': 'False', 'blank': 'True'}),
'last_invited': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'canvas.followcategory': {
'Meta': {'unique_together': "(('user', 'category'),)", 'object_name': 'FollowCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': u"orm['canvas.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following'", 'to': u"orm['auth.User']"})
},
u'canvas.friendjoinednotificationreceipt': {
'Meta': {'unique_together': "(('actor', 'recipient'),)", 'object_name': 'FriendJoinedNotificationReceipt'},
'actor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"})
},
u'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Content']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'canvas.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'avatar': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Content']", 'null': 'True'}),
'bio_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'email_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'enable_timeline': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_timeline_posts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'follower_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_bypass': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'is_qa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'profile_image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['canvas.Comment']", 'null': 'True'}),
'trust_changed': ('canvas.util.UnixTimestampField', [], {'null': 'True', 'blank': 'True'}),
'trusted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'canvas.usermoderationlog': {
'Meta': {'object_name': 'UserModerationLog'},
'action': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderation_log'", 'to': u"orm['auth.User']"})
},
u'canvas.userwarning': {
'Meta': {'object_name': 'UserWarning'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'confirmed': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'custom_message': ('django.db.models.fields.TextField', [], {}),
'disable_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('canvas.util.UnixTimestampField', [], {}),
'stock_message': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_warnings'", 'to': u"orm['auth.User']"}),
'viewed': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
u'canvas.welcomeemailrecipient': {
'Meta': {'object_name': 'WelcomeEmailRecipient'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas']
symmetrical = True
|
import sys
import os
import json
import importlib
import logging
import functools
import click
from botocore.config import Config as BotocoreConfig
from botocore.session import Session
from typing import Any, Optional, Dict, MutableMapping # noqa
from chalice import __version__ as chalice_version
from chalice.awsclient import TypedAWSClient
from chalice.app import Chalice # noqa
from chalice.config import Config
from chalice.config import DeployedResources # noqa
from chalice.package import create_app_packager
from chalice.package import AppPackager # noqa
from chalice.constants import DEFAULT_STAGE_NAME
from chalice.constants import DEFAULT_APIGATEWAY_STAGE_NAME
from chalice.constants import DEFAULT_ENDPOINT_TYPE
from chalice.logs import LogRetriever
from chalice import local
from chalice.utils import UI # noqa
from chalice.utils import PipeReader # noqa
from chalice.deploy import deployer # noqa
from chalice.deploy import validate
from chalice.invoke import LambdaInvokeHandler
from chalice.invoke import LambdaInvoker
from chalice.invoke import LambdaResponseFormatter
OptStr = Optional[str]
OptInt = Optional[int]
def create_botocore_session(profile=None, debug=False,
connection_timeout=None,
read_timeout=None,
max_retries=None):
# type: (OptStr, bool, OptInt, OptInt, OptInt) -> Session
s = Session(profile=profile)
_add_chalice_user_agent(s)
if debug:
_inject_large_request_body_filter()
config_args = {} # type: Dict[str, Any]
if connection_timeout is not None:
config_args['connect_timeout'] = connection_timeout
if read_timeout is not None:
config_args['read_timeout'] = read_timeout
if max_retries is not None:
config_args['retries'] = {'max_attempts': max_retries}
if config_args:
config = BotocoreConfig(**config_args)
s.set_default_client_config(config)
return s
def _add_chalice_user_agent(session):
# type: (Session) -> None
suffix = '%s/%s' % (session.user_agent_name, session.user_agent_version)
session.user_agent_name = 'aws-chalice'
session.user_agent_version = chalice_version
session.user_agent_extra = suffix
def _inject_large_request_body_filter():
# type: () -> None
log = logging.getLogger('botocore.endpoint')
log.addFilter(LargeRequestBodyFilter())
class NoSuchFunctionError(Exception):
"""The specified function could not be found."""
def __init__(self, name):
# type: (str) -> None
self.name = name
super(NoSuchFunctionError, self).__init__()
class UnknownConfigFileVersion(Exception):
def __init__(self, version):
# type: (str) -> None
super(UnknownConfigFileVersion, self).__init__(
"Unknown version '%s' in config.json" % version)
class LargeRequestBodyFilter(logging.Filter):
def filter(self, record):
# type: (Any) -> bool
# Note: the proper type should be "logging.LogRecord", but
# the typechecker complains about 'Invalid index type "int" for "dict"'
# so we're using Any for now.
if record.msg.startswith('Making request'):
if record.args[0].name in ['UpdateFunctionCode', 'CreateFunction']:
# When using the ZipFile argument (which is used in chalice),
# the entire deployment package zip is sent as a base64 encoded
# string. We don't want this to clutter the debug logs
# so we don't log the request body for lambda operations
# that have the ZipFile arg.
record.args = (record.args[:-1] +
('(... omitted from logs due to size ...)',))
return True
class CLIFactory(object):
def __init__(self, project_dir, debug=False, profile=None, environ=None):
# type: (str, bool, Optional[str], Optional[MutableMapping]) -> None
self.project_dir = project_dir
self.debug = debug
self.profile = profile
if environ is None:
environ = dict(os.environ)
self._environ = environ
def create_botocore_session(self, connection_timeout=None,
read_timeout=None, max_retries=None):
# type: (OptInt, OptInt, OptInt) -> Session
return create_botocore_session(profile=self.profile,
debug=self.debug,
connection_timeout=connection_timeout,
read_timeout=read_timeout,
max_retries=max_retries)
def create_default_deployer(self, session, config, ui):
# type: (Session, Config, UI) -> deployer.Deployer
return deployer.create_default_deployer(session, config, ui)
def create_deletion_deployer(self, session, ui):
# type: (Session, UI) -> deployer.Deployer
return deployer.create_deletion_deployer(
TypedAWSClient(session), ui)
def create_deployment_reporter(self, ui):
# type: (UI) -> deployer.DeploymentReporter
return deployer.DeploymentReporter(ui=ui)
def create_config_obj(self, chalice_stage_name=DEFAULT_STAGE_NAME,
autogen_policy=None,
api_gateway_stage=None):
# type: (str, Optional[bool], str) -> Config
user_provided_params = {} # type: Dict[str, Any]
default_params = {'project_dir': self.project_dir,
'api_gateway_stage': DEFAULT_APIGATEWAY_STAGE_NAME,
'api_gateway_endpoint_type': DEFAULT_ENDPOINT_TYPE,
'autogen_policy': True}
try:
config_from_disk = self.load_project_config()
except (OSError, IOError):
raise RuntimeError("Unable to load the project config file. "
"Are you sure this is a chalice project?")
except ValueError as err:
raise RuntimeError("Unable to load the project config file: %s"
% err)
self._validate_config_from_disk(config_from_disk)
if autogen_policy is not None:
user_provided_params['autogen_policy'] = autogen_policy
if self.profile is not None:
user_provided_params['profile'] = self.profile
if api_gateway_stage is not None:
user_provided_params['api_gateway_stage'] = api_gateway_stage
config = Config(chalice_stage=chalice_stage_name,
user_provided_params=user_provided_params,
config_from_disk=config_from_disk,
default_params=default_params)
user_provided_params['chalice_app'] = functools.partial(
self.load_chalice_app, config.environment_variables)
return config
def _validate_config_from_disk(self, config):
# type: (Dict[str, Any]) -> None
string_version = config.get('version', '1.0')
try:
version = float(string_version)
if version > 2.0:
raise UnknownConfigFileVersion(string_version)
except ValueError:
raise UnknownConfigFileVersion(string_version)
def create_app_packager(self, config, package_format):
# type: (Config, str) -> AppPackager
return create_app_packager(config, package_format)
def create_log_retriever(self, session, lambda_arn):
# type: (Session, str) -> LogRetriever
client = TypedAWSClient(session)
retriever = LogRetriever.create_from_lambda_arn(client, lambda_arn)
return retriever
def create_stdin_reader(self):
# type: () -> PipeReader
stream = click.get_binary_stream('stdin')
reader = PipeReader(stream)
return reader
def create_lambda_invoke_handler(self, name, stage):
# type: (str, str) -> LambdaInvokeHandler
config = self.create_config_obj(stage)
deployed = config.deployed_resources(stage)
try:
resource = deployed.resource_values(name)
arn = resource['lambda_arn']
except (KeyError, ValueError):
raise NoSuchFunctionError(name)
function_scoped_config = config.scope(stage, name)
# The session for max retries needs to be set to 0 for invoking a
# lambda function because in the case of a timeout or other retriable
# error the underlying client will call the function again.
session = self.create_botocore_session(
read_timeout=function_scoped_config.lambda_timeout,
max_retries=0,
)
client = TypedAWSClient(session)
invoker = LambdaInvoker(arn, client)
handler = LambdaInvokeHandler(
invoker,
LambdaResponseFormatter(),
UI(),
)
return handler
def load_chalice_app(self, environment_variables=None,
validate_feature_flags=True):
# type: (Optional[MutableMapping], Optional[bool]) -> Chalice
# validate_features indicates that we should validate that
# any expiremental features used have the appropriate feature flags.
if self.project_dir not in sys.path:
sys.path.insert(0, self.project_dir)
# The vendor directory has its contents copied up to the top level of
# the deployment package. This means that imports will work in the
# lambda function as if the vendor directory is on the python path.
# For loading the config locally we must add the vendor directory to
# the path so it will be treated the same as if it were running on
# lambda.
vendor_dir = os.path.join(self.project_dir, 'vendor')
if os.path.isdir(vendor_dir) and vendor_dir not in sys.path:
# This is a tradeoff we have to make for local use.
# The common use case of vendor/ is to include
# extension modules built for AWS Lambda. If you're
# running on a non-linux dev machine, then attempting
# to import these files will raise exceptions. As
# a workaround, the vendor is added to the end of
# sys.path so it's after `./lib/site-packages`.
# This gives you a change to install the correct
# version locally and still keep the lambda
# specific one in vendor/
sys.path.append(vendor_dir)
if environment_variables is not None:
self._environ.update(environment_variables)
try:
app = importlib.import_module('app')
chalice_app = getattr(app, 'app')
except SyntaxError as e:
message = (
'Unable to import your app.py file:\n\n'
'File "%s", line %s\n'
' %s\n'
'SyntaxError: %s'
) % (getattr(e, 'filename'), e.lineno, e.text, e.msg)
raise RuntimeError(message)
if validate_feature_flags:
validate.validate_feature_flags(chalice_app)
return chalice_app
def load_project_config(self):
# type: () -> Dict[str, Any]
"""Load the chalice config file from the project directory.
:raise: OSError/IOError if unable to load the config file.
"""
config_file = os.path.join(self.project_dir, '.chalice', 'config.json')
with open(config_file) as f:
return json.loads(f.read())
def create_local_server(self, app_obj, config, host, port):
# type: (Chalice, Config, str, int) -> local.LocalDevServer
return local.create_local_server(app_obj, config, host, port)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-06 13:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Block',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='板块名称')),
('desc', models.CharField(max_length=100, verbose_name='板块描述')),
('manager_name', models.CharField(max_length=100, verbose_name='板块管理员名称')),
],
),
]
|
# Register Adresses
WHO_AM_I = 0x00
X_OFFS_USRH = 0x0C
X_OFFS_USRL = 0x0D
Y_OFFS_USRH = 0x0E
Y_OFFS_USRL = 0x0F
Z_OFFS_USRH = 0x10
Z_OFFS_USRL = 0x11
FIFO_EN = 0x12
AUX_VDDIO = 0x13
AUX_SLV_ADDR = 0x14
SMPLRT_DIV = 0x15
DLPF_FS = 0x16
INT_CFG = 0x17
AUX_BURST_ADDR = 0x18
INT_STATUS = 0x1A
TEMP_OUT_H = 0x1B
TEMP_OUT_L = 0x1C
GYRO_XOUT_H = 0x1D
GYRO_XOUT_L = 0x1E
GYRO_YOUT_H = 0x1F
GYRO_YOUT_L = 0x20
GYRO_ZOUT_H = 0x21
GYRO_ZOUT_L = 0x22
AUX_XOUT_H = 0x23
AUX_XOUT_L = 0x24
AUX_YOUT_H = 0x25
AUX_YOUT_L = 0x26
AUX_ZOUT_H = 0x27
AUX_ZOUT_L = 0x28
FIFO_COUNTH = 0x3A
FIFO_COUNTL = 0x3B
FIFO_R = 0x3C
USER_CTRL = 0x3D
PWR_MGM = 0x3E
# 16 Bit registers
X_OFFS = X_OFFS_USRH
Y_OFFS = Y_OFFS_USRH
Z_OFFS = Z_OFFS_USRH
TEMP_OUT = TEMP_OUT_H
GYRO_XOUT = GYRO_XOUT_H
GYRO_YOUT = GYRO_YOUT_H
GYRO_ZOUT = GYRO_ZOUT_H
AUX_XOUT = AUX_XOUT_H
AUX_YOUT = AUX_YOUT_H
AUX_ZOUT = AUX_ZOUT_H
FIFO_COUNT = FIFO_COUNTH
# Masks and Values by register
# WHO_AM_I
ID = 0b01111110
# FIFO_EN
TEMP_OUT_EN = 0b10000000
GYRO_XOUT_EN = 0b01000000
GYRO_YOUT_EN = 0b00100000
GYRO_ZOUT_EN = 0b00010000
AUX_XOUT_EN = 0b00001000
AUX_YOUT_EN = 0b00000100
AUX_ZOUT_EN = 0b00000010
FIFO_FOOTER_EN = 0b00000001
# AUX_VDDIO
AUX_VDDIO_BIT = 0b00000100
# AUX_SLV_ADDR
CLKOUT_EN = 0b10000000
AUX_ID = 0b01111111
# DLPF_FS
FS_SEL_MASK = 0b00011000
FS_SEL_250 = 0b00000000
FS_SEL_500 = 0b00001000
FS_SEL_1000 = 0b00010000
FS_SEL_2000 = 0b00011000
DLPF_CFG = 0b00000111
DLPF_CFG_256 = 0b00000000
DLPF_CFG_188 = 0b00000001
DLPF_CFG_98 = 0b00000010
DLPF_CFG_42 = 0b00000011
DLPF_CFG_20 = 0b00000100
DLPF_CFG_10 = 0b00000101
DLPF_CFG_5 = 0b00000110
# INT_CFG
ACTL = 0b10000000
OPEN = 0b01000000
LATCH_INT_EN = 0b00100000
INT_ANYRD_2CLEAR = 0b00010000
I2C_MST_ERR_EN = 0b00001000
IMU_RDY_EN = 0b00000100
DMP_DONE_EN = 0b00000010
RAW_RDY_EN = 0b00000001
# INT_STATUS
FIFO_FULL = 0b10000000
I2C_MST_ERR = 0b00001000
IMU_RDY = 0b00000100
DMP_DONE = 0b00000010
RAW_DATA_RDY = 0b00000001
# USER_CTRL
DMP_EN = 0b10000000
FIFO_EN_BIT = 0b01000000
AUX_IF_EN = 0b00100000
AUX_IF_RST = 0b00001000
DMP_RST = 0b00000100
FIFO_RST = 0b00000010
GYRO_RST = 0b00000001
# PWR_MGM
H_RESET = 0b10000000
SLEEP = 0b01000000
STBY_XG = 0b00100000
STBY_YG = 0b00010000
STBY_ZG = 0b00001000
CLK_SEL_INT = 0b00000000 #Interal clock
CLK_SEL_X = 0b00000001
CLK_SEL_Y = 0b00000010
CLK_SEL_Z = 0b00000011
CLK_SEL_EXT1 = 0b00000100 #External clock @32.768kHz reference
CLK_SEL_EXT2 = 0b00000101 #External clock @19.2MHz reference
#CLK_SEL_RESV = 0b00000110 #Reserved. Not used
CLK_SEL_RST = 0b00000111
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""PeaceKeeper benchmark suite.
Peacekeeper measures browser's performance by testing its JavaScript
functionality. JavaScript is a widely used programming language used in the
creation of modern websites to provide features such as animation, navigation,
forms and other common requirements. By measuring a browser's ability to handle
commonly used JavaScript functions Peacekeeper can evaluate its performance.
Peacekeeper scores are measured in operations per second or rendered frames per
second depending on the test. Final Score is computed by calculating geometric
mean of individual tests scores.
"""
from telemetry import benchmark
from telemetry.page import page_measurement
from telemetry.page import page_set
from telemetry.util import statistics
from telemetry.value import merge_values
from telemetry.value import scalar
class _PeaceKeeperMeasurement(page_measurement.PageMeasurement):
def WillNavigateToPage(self, page, tab):
page.script_to_evaluate_on_commit = """
var __results = {};
var _done = false;
var __real_log = window.console.log;
var test_frame = null;
var benchmark = null;
window.console.log = function(msg) {
if (typeof(msg) == "string" && (msg.indexOf("benchmark")) == 0) {
test_frame = document.getElementById("testFrame");
benchmark = test_frame.contentWindow.benchmark;
test_frame.contentWindow.onbeforeunload = {};
if ((msg.indexOf("Submit ok.")) != -1) {
_done = true;
__results["test"] = benchmark.testObjectName;
__results["score"] = benchmark.test.result;
if (typeof(benchmark.test.unit) != "undefined") {
__results["unit"] = benchmark.test.unit;
} else {
__results["unit"] = benchmark.test.isFps ? "fps" : "ops";
}
}
}
__real_log.apply(this, [msg]);
}
"""
def MeasurePage(self, _, tab, results):
tab.WaitForJavaScriptExpression('_done', 600)
result = tab.EvaluateJavaScript('__results')
results.AddValue(scalar.ScalarValue(
results.current_page, '%s.Score' % result['test'], 'score',
int(result['score'])), important=False)
def DidRunTest(self, browser, results):
# Calculate geometric mean as the total for the combined tests.
combined = merge_values.MergeLikeValuesFromDifferentPages(
results.all_page_specific_values,
group_by_name_suffix=True)
combined_score = [x for x in combined if x.name == 'Score'][0]
total = statistics.GeometricMean(combined_score.values)
results.AddSummaryValue(
scalar.ScalarValue(None, 'Total.Score', 'score', total))
@benchmark.Disabled
class PeaceKeeperBenchmark(benchmark.Benchmark):
"""A base class for Peackeeper benchmarks."""
test = _PeaceKeeperMeasurement
def CreatePageSet(self, options):
"""Makes a PageSet for PeaceKeeper benchmarks."""
# Subclasses are expected to define a class member called query_param.
if not hasattr(self, 'test_param'):
raise NotImplementedError('test_param not in PeaceKeeper benchmark.')
ps = page_set.PageSet(
archive_data_file='../page_sets/data/peacekeeper_%s.json' % self.tag,
make_javascript_deterministic=False)
for test_name in self.test_param:
ps.AddPageWithDefaultRunNavigate(
('http://peacekeeper.futuremark.com/run.action?debug=true&'
'repeat=false&forceSuiteName=%s&forceTestName=%s') %
(self.tag, test_name))
return ps
@benchmark.Disabled
class PeaceKeeperRender(PeaceKeeperBenchmark):
"""PeaceKeeper rendering benchmark suite.
These tests measure your browser's ability to render and modify specific
elements used in typical web pages. Rendering tests manipulate the DOM tree in
real-time. The tests measure display updating speed (frames per seconds).
"""
tag = 'render'
test_param = ['renderGrid01',
'renderGrid02',
'renderGrid03',
'renderPhysics'
]
@benchmark.Disabled
class PeaceKeeperData(PeaceKeeperBenchmark):
"""PeaceKeeper Data operations benchmark suite.
These tests measure your browser's ability to add, remove and modify data
stored in an array. The Data suite consists of two tests:
1. arrayCombined: This test uses all features of the JavaScript Array object.
This is a technical test that is not based on profiled data.
The source data are different sized arrays of numbers.
2. arrayWeighted: This test is similar to 'arrayCombined', but the load is
balanced based on profiled data. The source data is a list of all the
countries in the world.
"""
tag = 'array'
test_param = ['arrayCombined01',
'arrayWeighted'
]
@benchmark.Disabled
class PeaceKeeperDom(PeaceKeeperBenchmark):
"""PeaceKeeper DOM operations benchmark suite.
These tests emulate the methods used to create typical dynamic webpages.
The DOM tests are based on development experience and the capabilities of the
jQuery framework.
1. domGetElements: This test uses native DOM methods getElementById and
getElementsByName. The elements are not modified.
2. domDynamicCreationCreateElement: A common use of DOM is to dynamically
create content with JavaScript, this test measures creating objects
individually and then appending them to DOM.
3. domDynamicCreationInnerHTML: This test is similarl to the previous one,
but uses the innerHTML-method.
4. domJQueryAttributeFilters: This test does a DOM query with jQuery.
It searches elements with specific attributes.
5. domJQueryBasicFilters: This test uses filters to query elements from DOM.
6. domJQueryBasics: This test queries elements from DOM with basic methods.
It is similar to domGetElements, but uses jQuery rather than native methods.
7. domJQueryContentFilters: Query elements based on content. This does string
searching and these methods are assumed to be time consuming.
8. domJQueryHierarchy: Query elements based on hierarchy, such as getting
sibling, parent or child nodes from a DOM tree.
9. domQueryselector: QuerySelector, which allows JavaScript to search elements
from the DOM tree directly without the need to iterate the whole tree
through domGetElements.
"""
tag = 'dom'
test_param = ['domGetElements',
'domDynamicCreationCreateElement',
'domDynamicCreationInnerHTML',
'domJQueryAttributeFilters',
'domJQueryBasicFilters',
'domJQueryBasics',
'domJQueryContentFilters',
'domJQueryHierarchy',
'domQueryselector'
]
@benchmark.Disabled
class PeaceKeeperTextParsing(PeaceKeeperBenchmark):
"""PeaceKeeper Text Parsing benchmark suite.
These tests measure your browser's performance in typical text manipulations
such as using a profanity filter for chats, browser detection and form
validation.
1. stringChat: This test removes swearing from artificial chat messages.
Test measures looping and string replace-method.
2. stringDetectBrowser: This test uses string indexOf-method to detect browser
and operating system.
3. stringFilter: This test filters a list of movies with a given keyword.
The behaviour is known as filtering select or continuous filter. It's used
to give real time suggestions while a user is filling input fields.
The test uses simple regular expressions.
4. stringValidateForm: This test uses complex regular expressions to validate
user input.
5. stringWeighted: This is an artificial test. Methods used and their
intensities are chosen based on profiled data.
"""
tag = 'string'
test_param = ['stringChat',
'stringDetectBrowser',
'stringFilter',
'stringWeighted',
'stringValidateForm'
]
@benchmark.Disabled
class PeaceKeeperHTML5Canvas(PeaceKeeperBenchmark):
"""PeaceKeeper HTML5 Canvas benchmark suite.
These tests use HTML5 Canvas, which is a web technology for drawing and
manipulating graphics without external plug-ins.
1. experimentalRipple01: Simulates a 'water ripple' effect by using HTML 5
Canvas. It measures the browser's ability to draw individual pixels.
2. experimentalRipple02: Same test as 'experimentalRipple01', but with a
larger canvas and thus a heavier workload.
"""
tag = 'experimental'
test_param = ['experimentalRipple01',
'experimentalRipple02'
]
@benchmark.Disabled
class PeaceKeeperHTML5Capabilities(PeaceKeeperBenchmark):
"""PeaceKeeper HTML5 Capabilities benchmark suite.
These tests checks browser HTML5 capabilities support for WebGL, Video
foramts, simple 2D sprite based games and web worker.
This benchmark only tests HTML5 capability and thus is not calculate into the
overall score.
1. HTML5 - WebGL: WebGL allows full blown 3D graphics to be rendered in a
browser without the need for any external plug-ins.
a) webglSphere
2. HTML5 - Video: hese tests find out which HTML5 video formats are supposed
by your browser. Peacekeeper only checks if your browser is able to play a
specific format, no other valuation is done.
a) videoCodecH264
b) videoCodecTheora
c) videoCodecWebM
d) videoPosterSupport
3.HTML5 - Web Worker: These tests use HTML5 Web Worker, which allows
JavaScript to multhread - ie. the ability to perform multiple actions
concurrently.
a) workerContrast01
b) workerContrast02
4. HTML5 - Game: This test simulates a simple 2D, sprite-based game.
The test itself is the real game, and what is shown is a recorded play.
a) gamingSpitfire
"""
tag = 'html5'
test_param = ['webglSphere',
'gamingSpitfire',
'videoCodecH264',
'videoCodecTheora',
'videoCodecWebM',
'videoPosterSupport',
'workerContrast01',
'workerContrast02'
]
|
# Copyright 2019 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from graspy.simulations import sample_edges
def check_dirloop(directed, loops):
if type(directed) is not bool:
raise TypeError("directed is not of type bool.")
if type(loops) is not bool:
raise TypeError("loops is not of type bool.")
def check_r(r):
if not np.issubdtype(type(r), np.floating):
raise TypeError("r is not of type float.")
elif r < -1 or r > 1:
msg = "r must between -1 and 1."
raise ValueError(msg)
def check_rel_er(p, r):
if p + r * (1 - p) < 0:
msg = "p + r * (1 - p) should be bigger than 0"
raise ValueError(msg)
if p * (1 - r) < 0:
msg = "p * (1 - r) should be bigger than 0"
raise ValueError(msg)
def check_rel_sbm(p, r):
for i in range(np.array(p).shape[0]):
for j in range(np.array(p).shape[1]):
if p[i][j] + r * (1 - p[i][j]) < 0:
msg = "p + r * (1 - p) should be bigger than 0"
raise ValueError(msg)
elif p[i][j] * (1 - r) < 0:
msg = "p * (1 - r) should be bigger than 0"
raise ValueError(msg)
def sample_edges_corr(P, R, directed=False, loops=False):
"""
Generate a pair of correlated graphs with Bernoulli distribution.
Both G1 and G2 are binary matrices.
Parameters
----------
P: np.ndarray, shape (n_vertices, n_vertices)
Matrix of probabilities (between 0 and 1) for a random graph.
R: np.ndarray, shape (n_vertices, n_vertices)
Matrix of correlation (between 0 and 1) between graph pairs.
directed: boolean, optional (default=False)
If False, output adjacency matrix will be symmetric. Otherwise, output adjacency
matrix will be asymmetric.
loops: boolean, optional (default=False)
If False, no edges will be sampled in the diagonal. Otherwise, edges
are sampled in the diagonal.
References
----------
.. [1] Vince Lyzinski, et al. "Seeded Graph Matching for Correlated Erdos-Renyi Graphs",
Journal of Machine Learning Research 15, 2014
Returns
-------
G1: ndarray (n_vertices, n_vertices)
Adjacency matrix the same size as P representing a random graph.
G2: ndarray (n_vertices, n_vertices)
Adjacency matrix the same size as P representing a random graph.
Examples
--------
>>> np.random.seed(1)
>>> p = 0.5
>>> r = 0.3
>>> R = r * np.ones((5, 5))
>>> P = p * np.ones((5, 5))
To sample a correlated graph pair based on P and R matrices:
>>> sample_edges_corr(P, R, directed = False, loops = False)
(array([[0., 1., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 0., 1.],
[0., 0., 1., 1., 0.]]), array([[0., 1., 0., 0., 0.],
[1., 0., 1., 0., 1.],
[0., 1., 0., 1., 1.],
[0., 0., 1., 0., 1.],
[0., 1., 1., 1., 0.]]))
"""
# test input
# check P
if type(P) is not np.ndarray:
raise TypeError("P must be numpy.ndarray")
if len(P.shape) != 2:
raise ValueError("P must have dimension 2 (n_vertices, n_vertices)")
if P.shape[0] != P.shape[1]:
raise ValueError("P must be a square matrix")
# check R
if type(R) is not np.ndarray:
raise TypeError("R must be numpy.ndarray")
if len(R.shape) != 2:
raise ValueError("R must have dimension 2 (n_vertices, n_vertices)")
if R.shape[0] != P.shape[1]:
raise ValueError("R must be a square matrix")
# check directed and loops
check_dirloop(directed, loops)
G1 = sample_edges(P, directed=directed, loops=loops)
P2 = G1.copy()
P2 = np.where(P2 == 1, P + R * (1 - P), P * (1 - R))
G2 = sample_edges(P2, directed=directed, loops=loops)
return G1, G2
def er_corr(n, p, r, directed=False, loops=False):
"""
Generate a pair of correlated graphs with specified edge probability
Both G1 and G2 are binary matrices.
Parameters
----------
n: int
Number of vertices
p: float
Probability of an edge existing between two vertices, between 0 and 1.
r: float
The value of the correlation between the same vertices in two graphs.
directed: boolean, optional (default=False)
If False, output adjacency matrix will be symmetric. Otherwise, output adjacency
matrix will be asymmetric.
loops: boolean, optional (default=False)
If False, no edges will be sampled in the diagonal. Otherwise, edges
are sampled in the diagonal.
Returns
-------
G1: ndarray (n_vertices, n_vertices)
Adjacency matrix the same size as P representing a random graph.
G2: ndarray (n_vertices, n_vertices)
Adjacency matrix the same size as P representing a random graph.
Examples
--------
>>> np.random.seed(2)
>>> p = 0.5
>>> r = 0.3
>>> n = 5
To sample a correlated ER graph pair based on n, p and r:
>>> er_corr(n, p, r, directed=False, loops=False)
(array([[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.],
[1., 0., 0., 1., 1.],
[0., 1., 1., 0., 1.],
[0., 0., 1., 1., 0.]]), array([[0., 1., 1., 1., 0.],
[1., 0., 0., 1., 0.],
[1., 0., 0., 1., 1.],
[1., 1., 1., 0., 1.],
[0., 0., 1., 1., 0.]]))
"""
# test input
# check n
if not np.issubdtype(type(n), np.integer):
raise TypeError("n is not of type int.")
elif n <= 0:
msg = "n must be > 0."
raise ValueError(msg)
# check p
if not np.issubdtype(type(p), np.floating):
raise TypeError("r is not of type float.")
elif p < 0 or p > 1:
msg = "p must between 0 and 1."
raise ValueError(msg)
# check r
check_r(r)
# check the relation between r and p
check_rel_er(p, r)
# check directed and loops
check_dirloop(directed, loops)
P = p * np.ones((n, n))
R = r * np.ones((n, n))
G1, G2 = sample_edges_corr(P, R, directed=directed, loops=loops)
return G1, G2
def sbm_corr(n, p, r, directed=False, loops=False):
"""
Generate a pair of correlated graphs with specified edge probability
Both G1 and G2 are binary matrices.
Parameters
----------
n: list of int, shape (n_communities)
Number of vertices in each community. Communities are assigned n[0], n[1], ...
p: array-like, shape (n_communities, n_communities)
Probability of an edge between each of the communities, where p[i, j] indicates
the probability of a connection between edges in communities [i, j].
0 < p[i, j] < 1 for all i, j.
r: float
Probability of the correlation between the same vertices in two graphs.
directed: boolean, optional (default=False)
If False, output adjacency matrix will be symmetric. Otherwise, output adjacency
matrix will be asymmetric.
loops: boolean, optional (default=False)
If False, no edges will be sampled in the diagonal. Otherwise, edges
are sampled in the diagonal.
Returns
-------
G1: ndarray (n_vertices, n_vertices)
Adjacency matrix the same size as P representing a random graph.
G2: ndarray (n_vertices, n_vertices)
Adjacency matrix the same size as P representing a random graph.
Examples
--------
>>> np.random.seed(3)
>>> n = [3, 3]
>>> p = [[0.5, 0.1], [0.1, 0.5]]
>>> r = 0.3
To sample a correlated SBM graph pair based on n, p and r:
>>> sbm_corr(n, p, r, directed=False, loops=False)
(array([[0., 1., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0.]]), array([[0., 1., 0., 0., 0., 0.],
[1., 0., 0., 1., 1., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 1.],
[0., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0.]]))
"""
# test input
# Check n
if not isinstance(n, (list, np.ndarray)):
msg = "n must be a list or np.array, not {}.".format(type(n))
raise TypeError(msg)
else:
n = np.array(n)
if not np.issubdtype(n.dtype, np.integer):
msg = "There are non-integer elements in n"
raise ValueError(msg)
# Check p
if not isinstance(p, (list, np.ndarray)):
msg = "p must be a list or np.array, not {}.".format(type(p))
raise TypeError(msg)
else:
p = np.array(p)
if not np.issubdtype(p.dtype, np.number):
msg = "There are non-numeric elements in p"
raise ValueError(msg)
elif p.shape != (n.size, n.size):
msg = "p is must have shape len(n) x len(n), not {}".format(p.shape)
raise ValueError(msg)
elif np.any(p < 0) or np.any(p > 1):
msg = "Values in p must be in between 0 and 1."
raise ValueError(msg)
# check r
check_r(r)
# check the relation between r and p
check_rel_sbm(p, r)
# check directed and loops
check_dirloop(directed, loops)
P = np.zeros((np.sum(n), np.sum(n)))
block_indices = np.insert(np.cumsum(np.array(n)), 0, 0)
for i in range(np.array(p).shape[0]): # for each row
for j in range(np.array(p).shape[1]): # for each column
P[
block_indices[i] : block_indices[i + 1],
block_indices[j] : block_indices[j + 1],
] = p[i][j]
R = r * np.ones((np.sum(n), np.sum(n)))
G1, G2 = sample_edges_corr(P, R, directed=directed, loops=loops)
return G1, G2
|
#
# Copyright (c) 2009-2015, Mendix bv
# All Rights Reserved.
#
# http://www.mendix.com/
#
import os
import shutil
import subprocess
import socket
import http.client
from .log import logger
try:
import readline
# allow - in filenames we're completing without messing up completion
readline.set_completer_delims(
readline.get_completer_delims().replace("-", "")
)
except ImportError:
pass
try:
import httplib2
except ImportError:
logger.critical(
"Failed to import httplib2. This module is needed by "
"m2ee. Please povide it on the python library path"
)
raise
def unpack(config, mda_name):
mda_file_name = os.path.join(config.get_model_upload_path(), mda_name)
if not os.path.isfile(mda_file_name):
logger.error("file %s does not exist" % mda_file_name)
return False
logger.debug("Testing archive...")
cmd = ("unzip", "-tqq", mda_file_name)
logger.trace("Executing %s" % str(cmd))
try:
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
logger.error(
"An error occured while testing archive " "consistency:"
)
logger.error("stdout: %s" % stdout)
logger.error("stderr: %s" % stderr)
return False
else:
logger.trace("stdout: %s" % stdout)
logger.trace("stderr: %s" % stderr)
except OSError as ose:
import errno
if ose.errno == errno.ENOENT:
logger.error(
"The unzip program could not be found: %s" % ose.strerror
)
else:
logger.error("An error occured while executing unzip: %s" % ose)
return False
logger.debug("Removing everything in model/ and web/ locations...")
# TODO: error handling. removing model/ and web/ itself should not be
# possible (parent dir is root owned), all errors ignored for now
app_base = config.get_app_base()
shutil.rmtree(os.path.join(app_base, "model"), ignore_errors=True)
shutil.rmtree(os.path.join(app_base, "web"), ignore_errors=True)
logger.debug("Extracting archive...")
cmd = ("unzip", "-oq", mda_file_name, "web/*", "model/*", "-d", app_base)
logger.trace("Executing %s" % str(cmd))
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
logger.error("An error occured while extracting archive:")
logger.error("stdout: %s" % stdout)
logger.error("stderr: %s" % stderr)
return False
else:
logger.trace("stdout: %s" % stdout)
logger.trace("stderr: %s" % stderr)
# XXX: reset permissions on web/ model/ to be sure after executing this
# function
return True
def fix_mxclientsystem_symlink(config):
logger.debug("Running fix_mxclientsystem_symlink...")
mxclient_symlink = os.path.join(
config.get_public_webroot_path(), "mxclientsystem"
)
logger.trace("mxclient_symlink: %s" % mxclient_symlink)
real_mxclientsystem_path = config.get_real_mxclientsystem_path()
logger.trace("real_mxclientsystem_path: %s" % real_mxclientsystem_path)
if os.path.islink(mxclient_symlink):
current_real_mxclientsystem_path = os.path.realpath(mxclient_symlink)
if current_real_mxclientsystem_path != real_mxclientsystem_path:
logger.debug(
"mxclientsystem symlink exists, but points "
"to %s" % current_real_mxclientsystem_path
)
logger.debug(
"redirecting symlink to %s" % real_mxclientsystem_path
)
os.unlink(mxclient_symlink)
os.symlink(real_mxclientsystem_path, mxclient_symlink)
elif not os.path.exists(mxclient_symlink):
logger.debug(
"creating mxclientsystem symlink pointing to %s"
% real_mxclientsystem_path
)
try:
os.symlink(real_mxclientsystem_path, mxclient_symlink)
except OSError as e:
logger.error("creating symlink failed: %s" % e)
else:
logger.warn(
"Not touching mxclientsystem symlink: file exists "
"and is not a symlink"
)
def run_post_unpack_hook(post_unpack_hook):
if os.path.isfile(post_unpack_hook):
if os.access(post_unpack_hook, os.X_OK):
logger.info("Running post-unpack-hook: %s" % post_unpack_hook)
retcode = subprocess.call((post_unpack_hook,))
if retcode != 0:
logger.error(
"The post-unpack-hook returned a "
"non-zero exit code: %d" % retcode
)
else:
logger.error(
"post-unpack-hook script %s is not "
"executable." % post_unpack_hook
)
else:
logger.error(
"post-unpack-hook script %s does not exist." % post_unpack_hook
)
def check_download_runtime_existence(url):
h = httplib2.Http(timeout=10)
logger.debug("Checking for existence of %s via HTTP HEAD" % url)
try:
(response_headers, response_body) = h.request(url, "HEAD")
logger.trace("Response headers: %s" % response_headers)
except (
httplib2.HttpLib2Error,
http.client.HTTPException,
socket.error,
) as e:
logger.error(
"Checking download url %s failed: %s: %s"
% (url, e.__class__.__name__, e)
)
return False
if response_headers["status"] == "200":
logger.debug("Ok, got HTTP 200")
return True
if response_headers["status"] == "404":
logger.error("The location %s cannot be found." % url)
return False
logger.error(
"Checking download url %s failed, HTTP status code %s"
% (url, response_headers["status"])
)
return False
def download_and_unpack_runtime(url, path):
if not check_download_runtime_existence(url):
return
logger.info("Going to download and extract %s to %s" % (url, path))
p1 = subprocess.Popen(
[
"wget",
"-O",
"-",
url,
],
stdout=subprocess.PIPE,
)
p2 = subprocess.Popen(
[
"tar",
"xz",
"-C",
path,
],
stdin=p1.stdout,
stdout=subprocess.PIPE,
)
p1.stdout.close()
stdout, stderr = p2.communicate()
if p2.returncode == 0:
logger.info("Successfully downloaded runtime!")
return True
else:
logger.error("Could not download and unpack runtime:")
logger.error(stderr)
return False
|
from random import randint
import time
computador = randint(0, 5)
print('~^~' * 20)
print('\033[1;44m Estou pensando em um número de 0 a 5. Tente descobrir qual é... \033[m')
print('~^~' * 20)
jogador = int(input('Em que número estou pensando? '))
print('\033[1;40mPROCESSANDO...')
time.sleep(2)
if jogador == computador:
print('\033[m \033[7;1;44m Parabéns, você acertou !! \033[mO número pensado foi {}'.format(computador))
else:
print('\033[1;31;40m Você errou!! \033[m O número pensado foi {}'.format(computador))
|
#\\---- double backslash
print("I am here for the double backslash\\\\")
|
# TensorFlow external dependencies that can be loaded in WORKSPACE files.
load("//third_party/gpus:cuda_configure.bzl", "cuda_configure")
load("//third_party/gpus:rocm_configure.bzl", "rocm_configure")
load("//third_party/tensorrt:tensorrt_configure.bzl", "tensorrt_configure")
load("//third_party/nccl:nccl_configure.bzl", "nccl_configure")
load("//third_party/git:git_configure.bzl", "git_configure")
load("//third_party/py:python_configure.bzl", "python_configure")
load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure")
load("//third_party/toolchains/remote:configure.bzl", "remote_execution_configure")
load("//third_party/toolchains/clang6:repo.bzl", "clang6_configure")
load("//third_party/toolchains/cpus/arm:arm_compiler_configure.bzl", "arm_compiler_configure")
load("//third_party/toolchains/embedded/arm-linux:arm_linux_toolchain_configure.bzl", "arm_linux_toolchain_configure")
load("//third_party:repo.bzl", "tf_http_archive")
load("//third_party/clang_toolchain:cc_configure_clang.bzl", "cc_download_clang_toolchain")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
load("@bazel_tools//tools/build_defs/repo:java.bzl", "java_import_external")
load("@io_bazel_rules_closure//closure:defs.bzl", "filegroup_external")
load(
"//tensorflow/tools/def_file_filter:def_file_filter_configure.bzl",
"def_file_filter_configure",
)
load("//third_party/FP16:workspace.bzl", FP16 = "repo")
load("//third_party/aws:workspace.bzl", aws = "repo")
load("//third_party/clog:workspace.bzl", clog = "repo")
load("//third_party/cpuinfo:workspace.bzl", cpuinfo = "repo")
load("//third_party/dlpack:workspace.bzl", dlpack = "repo")
load("//third_party/flatbuffers:workspace.bzl", flatbuffers = "repo")
load("//third_party/hexagon:workspace.bzl", hexagon_nn = "repo")
load("//third_party/highwayhash:workspace.bzl", highwayhash = "repo")
load("//third_party/hwloc:workspace.bzl", hwloc = "repo")
load("//third_party/icu:workspace.bzl", icu = "repo")
load("//third_party/jpeg:workspace.bzl", jpeg = "repo")
load("//third_party/nasm:workspace.bzl", nasm = "repo")
load("//third_party/opencl_headers:workspace.bzl", opencl_headers = "repo")
load("//third_party/kissfft:workspace.bzl", kissfft = "repo")
load("//third_party/pasta:workspace.bzl", pasta = "repo")
load("//third_party/psimd:workspace.bzl", psimd = "repo")
load("//third_party/ruy:workspace.bzl", ruy = "repo")
load("//third_party/sobol_data:workspace.bzl", sobol_data = "repo")
load("//third_party/vulkan_headers:workspace.bzl", vulkan_headers = "repo")
load("//third_party/toolchains/remote_config:configs.bzl", "initialize_rbe_configs")
def initialize_third_party():
""" Load third party repositories. See above load() statements. """
FP16()
aws()
clog()
cpuinfo()
dlpack()
flatbuffers()
hexagon_nn()
highwayhash()
hwloc()
icu()
kissfft()
jpeg()
nasm()
opencl_headers()
pasta()
psimd()
sobol_data()
vulkan_headers()
ruy()
# Sanitize a dependency so that it works correctly from code that includes
# TensorFlow as a submodule.
def clean_dep(dep):
return str(Label(dep))
# If TensorFlow is linked as a submodule.
# path_prefix and tf_repo_name are no longer used.
def tf_workspace(path_prefix = "", tf_repo_name = ""):
tf_repositories(path_prefix, tf_repo_name)
tf_bind()
# Toolchains & platforms required by Tensorflow to build.
def tf_toolchains():
native.register_execution_platforms("@local_execution_config_platform//:platform")
native.register_toolchains("@local_execution_config_python//:py_toolchain")
# Define all external repositories required by TensorFlow
def tf_repositories(path_prefix = "", tf_repo_name = ""):
"""All external dependencies for TF builds."""
# Initialize toolchains and platforms.
tf_toolchains()
# Loads all external repos to configure RBE builds.
initialize_rbe_configs()
# Note that we check the minimum bazel version in WORKSPACE.
clang6_configure(name = "local_config_clang6")
cc_download_clang_toolchain(name = "local_config_download_clang")
cuda_configure(name = "local_config_cuda")
tensorrt_configure(name = "local_config_tensorrt")
nccl_configure(name = "local_config_nccl")
git_configure(name = "local_config_git")
syslibs_configure(name = "local_config_syslibs")
python_configure(name = "local_config_python")
rocm_configure(name = "local_config_rocm")
remote_execution_configure(name = "local_config_remote_execution")
initialize_third_party()
# For windows bazel build
# TODO: Remove def file filter when TensorFlow can export symbols properly on Windows.
def_file_filter_configure(name = "local_config_def_file_filter")
# Point //external/local_config_arm_compiler to //external/arm_compiler
arm_compiler_configure(
name = "local_config_arm_compiler",
build_file = clean_dep("//third_party/toolchains/cpus/arm:BUILD"),
remote_config_repo_arm = "../arm_compiler",
remote_config_repo_aarch64 = "../aarch64_compiler",
)
# TFLite crossbuild toolchain for embeddeds Linux
arm_linux_toolchain_configure(
name = "local_config_embedded_arm",
build_file = clean_dep("//third_party/toolchains/embedded/arm-linux:BUILD"),
aarch64_repo = "../aarch64_linux_toolchain",
armhf_repo = "../armhf_linux_toolchain",
)
if path_prefix:
print("path_prefix was specified to tf_workspace but is no longer used " +
"and will be removed in the future.")
# To update any of the dependencies bellow:
# a) update URL and strip_prefix to the new git commit hash
# b) get the sha256 hash of the commit by running:
# curl -L <url> | sha256sum
# and update the sha256 with the result.
tf_http_archive(
name = "XNNPACK",
sha256 = "95446b7b25ef2879f42ca7ffebf7894904b8ba7ec192040dcc3f2a634078f08c",
strip_prefix = "XNNPACK-c12dcda4bb85854a2926a04c6ac0dff175fd7163",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/XNNPACK/archive/c12dcda4bb85854a2926a04c6ac0dff175fd7163.zip",
"https://github.com/google/XNNPACK/archive/c12dcda4bb85854a2926a04c6ac0dff175fd7163.zip",
],
)
tf_http_archive(
name = "FXdiv",
sha256 = "ab7dfb08829bee33dca38405d647868fb214ac685e379ec7ef2bebcd234cd44d",
strip_prefix = "FXdiv-b408327ac2a15ec3e43352421954f5b1967701d1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/Maratyszcza/FXdiv/archive/b408327ac2a15ec3e43352421954f5b1967701d1.zip",
"https://github.com/Maratyszcza/FXdiv/archive/b408327ac2a15ec3e43352421954f5b1967701d1.zip",
],
)
tf_http_archive(
name = "pthreadpool",
sha256 = "8461f6540ae9f777ce20d1c0d1d249e5e61c438744fb390c0c6f91940aa69ea3",
strip_prefix = "pthreadpool-545ebe9f225aec6dca49109516fac02e973a3de2",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/Maratyszcza/pthreadpool/archive/545ebe9f225aec6dca49109516fac02e973a3de2.zip",
"https://github.com/Maratyszcza/pthreadpool/archive/545ebe9f225aec6dca49109516fac02e973a3de2.zip",
],
)
tf_http_archive(
name = "mkl_dnn",
build_file = clean_dep("//third_party/mkl_dnn:mkldnn.BUILD"),
sha256 = "a0211aeb5e7dad50b97fa5dffc1a2fe2fe732572d4164e1ee8750a2ede43fbec",
strip_prefix = "oneDNN-0.21.3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/oneapi-src/oneDNN/archive/v0.21.3.tar.gz",
"https://github.com/oneapi-src/oneDNN/archive/v0.21.3.tar.gz",
],
)
tf_http_archive(
name = "mkl_dnn_v1",
build_file = clean_dep("//third_party/mkl_dnn:mkldnn_v1.BUILD"),
sha256 = "5369f7b2f0b52b40890da50c0632c3a5d1082d98325d0f2bff125d19d0dcaa1d",
strip_prefix = "oneDNN-1.6.4",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/oneapi-src/oneDNN/archive/v1.6.4.tar.gz",
"https://github.com/oneapi-src/oneDNN/archive/v1.6.4.tar.gz",
],
)
tf_http_archive(
name = "com_google_absl",
build_file = clean_dep("//third_party:com_google_absl.BUILD"),
# TODO: Remove the patch when https://github.com/abseil/abseil-cpp/issues/326 is resolved
# and when TensorFlow is build against CUDA 10.2
patch_file = clean_dep("//third_party:com_google_absl_fix_mac_and_nvcc_build.patch"),
sha256 = "f368a8476f4e2e0eccf8a7318b98dafbe30b2600f4e3cf52636e5eb145aba06a", # SHARED_ABSL_SHA
strip_prefix = "abseil-cpp-df3ea785d8c30a9503321a3d35ee7d35808f190d",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-cpp/archive/df3ea785d8c30a9503321a3d35ee7d35808f190d.tar.gz",
"https://github.com/abseil/abseil-cpp/archive/df3ea785d8c30a9503321a3d35ee7d35808f190d.tar.gz",
],
)
tf_http_archive(
name = "eigen_archive",
build_file = clean_dep("//third_party:eigen.BUILD"),
patch_file = clean_dep("//third_party/eigen3:gpu_packet_math.patch"),
sha256 = "306f15c04fbd514b4adc3a327a2c6f63521ea6805cab75691fa30c30fea55193", # SHARED_EIGEN_SHA
strip_prefix = "eigen-fd1dcb6b45a2c797ad4c4d6cc7678ee70763b4ed",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/gitlab.com/libeigen/eigen/-/archive/fd1dcb6b45a2c797ad4c4d6cc7678ee70763b4ed/eigen-fd1dcb6b45a2c797ad4c4d6cc7678ee70763b4ed.tar.gz",
"https://gitlab.com/libeigen/eigen/-/archive/fd1dcb6b45a2c797ad4c4d6cc7678ee70763b4ed/eigen-fd1dcb6b45a2c797ad4c4d6cc7678ee70763b4ed.tar.gz",
],
)
tf_http_archive(
name = "arm_compiler",
build_file = clean_dep("//:arm_compiler.BUILD"),
sha256 = "b9e7d50ffd9996ed18900d041d362c99473b382c0ae049b2fce3290632d2656f",
strip_prefix = "rpi-newer-crosstools-eb68350c5c8ec1663b7fe52c742ac4271e3217c5/x64-gcc-6.5.0/arm-rpi-linux-gnueabihf/",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/rvagg/rpi-newer-crosstools/archive/eb68350c5c8ec1663b7fe52c742ac4271e3217c5.tar.gz",
"https://github.com/rvagg/rpi-newer-crosstools/archive/eb68350c5c8ec1663b7fe52c742ac4271e3217c5.tar.gz",
],
)
tf_http_archive(
# This is the latest `aarch64-none-linux-gnu` compiler provided by ARM
# See https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-a/downloads
# The archive contains GCC version 9.2.1
name = "aarch64_compiler",
build_file = "//:arm_compiler.BUILD",
sha256 = "8dfe681531f0bd04fb9c53cf3c0a3368c616aa85d48938eebe2b516376e06a66",
strip_prefix = "gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/developer.arm.com/-/media/Files/downloads/gnu-a/9.2-2019.12/binrel/gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu.tar.xz",
"https://developer.arm.com/-/media/Files/downloads/gnu-a/9.2-2019.12/binrel/gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu.tar.xz",
],
)
tf_http_archive(
name = "aarch64_linux_toolchain",
build_file = clean_dep("//third_party/toolchains/embedded/arm-linux:aarch64-linux-toolchain.BUILD"),
sha256 = "8ce3e7688a47d8cd2d8e8323f147104ae1c8139520eca50ccf8a7fa933002731",
strip_prefix = "gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/developer.arm.com/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz",
"https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz",
],
)
tf_http_archive(
name = "armhf_linux_toolchain",
build_file = clean_dep("//third_party/toolchains/embedded/arm-linux:armhf-linux-toolchain.BUILD"),
sha256 = "d4f6480ecaa99e977e3833cc8a8e1263f9eecd1ce2d022bb548a24c4f32670f5",
strip_prefix = "gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/developer.arm.com/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf.tar.xz",
"https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf.tar.xz",
],
)
tf_http_archive(
name = "libxsmm_archive",
build_file = clean_dep("//third_party:libxsmm.BUILD"),
sha256 = "9c0af4509ea341d1ee2c6c19fc6f19289318c3bd4b17844efeb9e7f9691abf76",
strip_prefix = "libxsmm-1.14",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/hfp/libxsmm/archive/1.14.tar.gz",
"https://github.com/hfp/libxsmm/archive/1.14.tar.gz",
],
)
tf_http_archive(
name = "com_googlesource_code_re2",
sha256 = "d070e2ffc5476c496a6a872a6f246bfddce8e7797d6ba605a7c8d72866743bf9",
strip_prefix = "re2-506cfa4bffd060c06ec338ce50ea3468daa6c814",
system_build_file = clean_dep("//third_party/systemlibs:re2.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz",
"https://github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz",
],
)
tf_http_archive(
name = "com_github_google_crc32c",
sha256 = "6b3b1d861bb8307658b2407bc7a4c59e566855ef5368a60b35c893551e4788e9",
build_file = "@com_github_googlecloudplatform_google_cloud_cpp//bazel:crc32c.BUILD",
strip_prefix = "crc32c-1.0.6",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/crc32c/archive/1.0.6.tar.gz",
"https://github.com/google/crc32c/archive/1.0.6.tar.gz",
],
)
tf_http_archive(
name = "com_github_googlecloudplatform_google_cloud_cpp",
sha256 = "ff82045b9491f0d880fc8e5c83fd9542eafb156dcac9ff8c6209ced66ed2a7f0",
strip_prefix = "google-cloud-cpp-1.17.1",
repo_mapping = {
"@com_github_curl_curl": "@curl",
"@com_github_nlohmann_json": "@nlohmann_json_lib",
},
system_build_file = clean_dep("//third_party/systemlibs:google_cloud_cpp.BUILD"),
system_link_files = {
"//third_party/systemlibs:google_cloud_cpp.google.cloud.bigtable.BUILD": "google/cloud/bigtable/BUILD",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/googleapis/google-cloud-cpp/archive/v1.17.1.tar.gz",
"https://github.com/googleapis/google-cloud-cpp/archive/v1.17.1.tar.gz",
],
)
tf_http_archive(
name = "com_github_googlecloudplatform_tensorflow_gcp_tools",
sha256 = "5e9ebe17eaa2895eb7f77fefbf52deeda7c4b63f5a616916b823eb74f3a0c542",
strip_prefix = "tensorflow-gcp-tools-2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/GoogleCloudPlatform/tensorflow-gcp-tools/archive/2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5.tar.gz",
"https://github.com/GoogleCloudPlatform/tensorflow-gcp-tools/archive/2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5.tar.gz",
],
)
tf_http_archive(
name = "com_google_googleapis",
build_file = clean_dep("//third_party/googleapis:googleapis.BUILD"),
sha256 = "7ebab01b06c555f4b6514453dc3e1667f810ef91d1d4d2d3aa29bb9fcb40a900",
strip_prefix = "googleapis-541b1ded4abadcc38e8178680b0677f65594ea6f",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/googleapis/googleapis/archive/541b1ded4abadcc38e8178680b0677f65594ea6f.zip",
"https://github.com/googleapis/googleapis/archive/541b1ded4abadcc38e8178680b0677f65594ea6f.zip",
],
)
tf_http_archive(
name = "gemmlowp",
sha256 = "43146e6f56cb5218a8caaab6b5d1601a083f1f31c06ff474a4378a7d35be9cfb", # SHARED_GEMMLOWP_SHA
strip_prefix = "gemmlowp-fda83bdc38b118cc6b56753bd540caa49e570745",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/gemmlowp/archive/fda83bdc38b118cc6b56753bd540caa49e570745.zip",
"https://github.com/google/gemmlowp/archive/fda83bdc38b118cc6b56753bd540caa49e570745.zip",
],
)
tf_http_archive(
name = "farmhash_archive",
build_file = clean_dep("//third_party:farmhash.BUILD"),
sha256 = "6560547c63e4af82b0f202cb710ceabb3f21347a4b996db565a411da5b17aba0", # SHARED_FARMHASH_SHA
strip_prefix = "farmhash-816a4ae622e964763ca0862d9dbd19324a1eaf45",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
"https://github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
],
)
tf_http_archive(
name = "png",
build_file = clean_dep("//third_party:png.BUILD"),
patch_file = clean_dep("//third_party:png_fix_rpi.patch"),
sha256 = "ca74a0dace179a8422187671aee97dd3892b53e168627145271cad5b5ac81307",
strip_prefix = "libpng-1.6.37",
system_build_file = clean_dep("//third_party/systemlibs:png.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/glennrp/libpng/archive/v1.6.37.tar.gz",
"https://github.com/glennrp/libpng/archive/v1.6.37.tar.gz",
],
)
tf_http_archive(
name = "org_sqlite",
build_file = clean_dep("//third_party:sqlite.BUILD"),
sha256 = "b34f4c0c0eefad9a7e515c030c18702e477f4ef7d8ade6142bdab8011b487ac6",
strip_prefix = "sqlite-amalgamation-3330000",
system_build_file = clean_dep("//third_party/systemlibs:sqlite.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2020/sqlite-amalgamation-3330000.zip",
"https://www.sqlite.org/2020/sqlite-amalgamation-3330000.zip",
],
)
tf_http_archive(
name = "gif",
build_file = clean_dep("//third_party:gif.BUILD"),
patch_file = clean_dep("//third_party:gif_fix_strtok_r.patch"),
sha256 = "31da5562f44c5f15d63340a09a4fd62b48c45620cd302f77a6d9acf0077879bd",
strip_prefix = "giflib-5.2.1",
system_build_file = clean_dep("//third_party/systemlibs:gif.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz",
"https://pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz",
],
)
tf_http_archive(
name = "six_archive",
build_file = clean_dep("//third_party:six.BUILD"),
sha256 = "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
strip_prefix = "six-1.15.0",
system_build_file = clean_dep("//third_party/systemlibs:six.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/source/s/six/six-1.15.0.tar.gz",
"https://pypi.python.org/packages/source/s/six/six-1.15.0.tar.gz",
],
)
tf_http_archive(
name = "astor_archive",
build_file = clean_dep("//third_party:astor.BUILD"),
sha256 = "95c30d87a6c2cf89aa628b87398466840f0ad8652f88eb173125a6df8533fb8d",
strip_prefix = "astor-0.7.1",
system_build_file = clean_dep("//third_party/systemlibs:astor.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz",
"https://pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz",
],
)
tf_http_archive(
name = "astunparse_archive",
build_file = clean_dep("//third_party:astunparse.BUILD"),
sha256 = "5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872",
strip_prefix = "astunparse-1.6.3/lib",
system_build_file = clean_dep("//third_party/systemlibs:astunparse.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/files.pythonhosted.org/packages/f3/af/4182184d3c338792894f34a62672919db7ca008c89abee9b564dd34d8029/astunparse-1.6.3.tar.gz",
"https://files.pythonhosted.org/packages/f3/af/4182184d3c338792894f34a62672919db7ca008c89abee9b564dd34d8029/astunparse-1.6.3.tar.gz",
],
)
filegroup_external(
name = "astunparse_license",
licenses = ["notice"], # PSFL
sha256_urls = {
"92fc0e4f4fa9460558eedf3412b988d433a2dcbb3a9c45402a145a4fab8a6ac6": [
"https://storage.googleapis.com/mirror.tensorflow.org/raw.githubusercontent.com/simonpercivall/astunparse/v1.6.2/LICENSE",
"https://raw.githubusercontent.com/simonpercivall/astunparse/v1.6.2/LICENSE",
],
},
)
tf_http_archive(
name = "functools32_archive",
build_file = clean_dep("//third_party:functools32.BUILD"),
sha256 = "f6253dfbe0538ad2e387bd8fdfd9293c925d63553f5813c4e587745416501e6d",
strip_prefix = "functools32-3.2.3-2",
system_build_file = clean_dep("//third_party/systemlibs:functools32.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz",
"https://pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz",
],
)
tf_http_archive(
name = "gast_archive",
build_file = clean_dep("//third_party:gast.BUILD"),
sha256 = "b881ef288a49aa81440d2c5eb8aeefd4c2bb8993d5f50edae7413a85bfdb3b57",
strip_prefix = "gast-0.3.3",
system_build_file = clean_dep("//third_party/systemlibs:gast.BUILD"),
urls = [
"http://mirror.tensorflow.org/files.pythonhosted.org/packages/12/59/eaa15ab9710a20e22225efd042cd2d6a0b559a0656d5baba9641a2a4a921/gast-0.3.3.tar.gz",
"https://files.pythonhosted.org/packages/12/59/eaa15ab9710a20e22225efd042cd2d6a0b559a0656d5baba9641a2a4a921/gast-0.3.3.tar.gz",
],
)
tf_http_archive(
name = "termcolor_archive",
build_file = clean_dep("//third_party:termcolor.BUILD"),
sha256 = "1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b",
strip_prefix = "termcolor-1.1.0",
system_build_file = clean_dep("//third_party/systemlibs:termcolor.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
"https://pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
],
)
tf_http_archive(
name = "typing_extensions_archive",
build_file = clean_dep("//third_party:typing_extensions.BUILD"),
sha256 = "79ee589a3caca649a9bfd2a8de4709837400dfa00b6cc81962a1e6a1815969ae",
strip_prefix = "typing_extensions-3.7.4.2/src_py3",
system_build_file = clean_dep("//third_party/systemlibs:typing_extensions.BUILD"),
urls = [
"http://mirror.tensorflow.org/files.pythonhosted.org/packages/6a/28/d32852f2af6b5ead85d396249d5bdf450833f3a69896d76eb480d9c5e406/typing_extensions-3.7.4.2.tar.gz",
"https://files.pythonhosted.org/packages/6a/28/d32852f2af6b5ead85d396249d5bdf450833f3a69896d76eb480d9c5e406/typing_extensions-3.7.4.2.tar.gz",
],
)
filegroup_external(
name = "typing_extensions_license",
licenses = ["notice"], # PSFL
sha256_urls = {
"ff17ce94e102024deb68773eb1cc74ca76da4e658f373531f0ac22d68a6bb1ad": [
"http://mirror.tensorflow.org/raw.githubusercontent.com/python/typing/master/typing_extensions/LICENSE",
"https://raw.githubusercontent.com/python/typing/master/typing_extensions/LICENSE",
],
},
)
tf_http_archive(
name = "opt_einsum_archive",
build_file = clean_dep("//third_party:opt_einsum.BUILD"),
sha256 = "d3d464b4da7ef09e444c30e4003a27def37f85ff10ff2671e5f7d7813adac35b",
strip_prefix = "opt_einsum-2.3.2",
system_build_file = clean_dep("//third_party/systemlibs:opt_einsum.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz",
"https://pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz",
],
)
tf_http_archive(
name = "absl_py",
sha256 = "603febc9b95a8f2979a7bdb77d2f5e4d9b30d4e0d59579f88eba67d4e4cc5462",
strip_prefix = "abseil-py-pypi-v0.9.0",
system_build_file = clean_dep("//third_party/systemlibs:absl_py.BUILD"),
system_link_files = {
"//third_party/systemlibs:absl_py.absl.BUILD": "absl/BUILD",
"//third_party/systemlibs:absl_py.absl.flags.BUILD": "absl/flags/BUILD",
"//third_party/systemlibs:absl_py.absl.testing.BUILD": "absl/testing/BUILD",
"//third_party/systemlibs:absl_py.absl.logging.BUILD": "absl/logging/BUILD",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-py/archive/pypi-v0.9.0.tar.gz",
"https://github.com/abseil/abseil-py/archive/pypi-v0.9.0.tar.gz",
],
)
tf_http_archive(
name = "enum34_archive",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz",
"https://pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz",
],
sha256 = "8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1",
build_file = clean_dep("//third_party:enum34.BUILD"),
system_build_file = clean_dep("//third_party/systemlibs:enum34.BUILD"),
strip_prefix = "enum34-1.1.6/enum",
)
tf_http_archive(
name = "org_python_pypi_backports_weakref",
build_file = clean_dep("//third_party:backports_weakref.BUILD"),
sha256 = "8813bf712a66b3d8b85dc289e1104ed220f1878cf981e2fe756dfaabe9a82892",
strip_prefix = "backports.weakref-1.0rc1/src",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
"https://pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
],
)
tf_http_archive(
name = "dill_archive",
build_file = clean_dep("//third_party:dill.BUILD"),
system_build_file = clean_dep("//third_party/systemlibs:dill.BUILD"),
urls = [
"http://mirror.tensorflow.org/files.pythonhosted.org/packages/e2/96/518a8ea959a734b70d2e95fef98bcbfdc7adad1c1e5f5dd9148c835205a5/dill-0.3.2.zip",
"https://files.pythonhosted.org/packages/e2/96/518a8ea959a734b70d2e95fef98bcbfdc7adad1c1e5f5dd9148c835205a5/dill-0.3.2.zip",
],
sha256 = "6e12da0d8e49c220e8d6e97ee8882002e624f1160289ce85ec2cc0a5246b3a2e",
strip_prefix = "dill-0.3.2",
)
tf_http_archive(
name = "tblib_archive",
build_file = clean_dep("//third_party:tblib.BUILD"),
system_build_file = clean_dep("//third_party/systemlibs:tblib.BUILD"),
urls = [
"http://mirror.tensorflow.org/files.pythonhosted.org/packages/d3/41/901ef2e81d7b1e834b9870d416cb09479e175a2be1c4aa1a9dcd0a555293/tblib-1.7.0.tar.gz",
"https://files.pythonhosted.org/packages/d3/41/901ef2e81d7b1e834b9870d416cb09479e175a2be1c4aa1a9dcd0a555293/tblib-1.7.0.tar.gz",
],
sha256 = "059bd77306ea7b419d4f76016aef6d7027cc8a0785579b5aad198803435f882c",
strip_prefix = "tblib-1.7.0",
)
filegroup_external(
name = "org_python_license",
licenses = ["notice"], # Python 2.0
sha256_urls = {
"e76cacdf0bdd265ff074ccca03671c33126f597f39d0ed97bc3e5673d9170cf6": [
"https://storage.googleapis.com/mirror.tensorflow.org/docs.python.org/2.7/_sources/license.rst.txt",
"https://docs.python.org/2.7/_sources/license.rst.txt",
],
},
)
tf_http_archive(
name = "com_google_protobuf",
patch_file = clean_dep("//third_party/protobuf:protobuf.patch"),
sha256 = "cfcba2df10feec52a84208693937c17a4b5df7775e1635c1e3baffc487b24c9b",
strip_prefix = "protobuf-3.9.2",
system_build_file = clean_dep("//third_party/systemlibs:protobuf.BUILD"),
system_link_files = {
"//third_party/systemlibs:protobuf.bzl": "protobuf.bzl",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/protocolbuffers/protobuf/archive/v3.9.2.zip",
"https://github.com/protocolbuffers/protobuf/archive/v3.9.2.zip",
],
)
tf_http_archive(
name = "nsync",
sha256 = "caf32e6b3d478b78cff6c2ba009c3400f8251f646804bcb65465666a9cea93c4",
strip_prefix = "nsync-1.22.0",
system_build_file = clean_dep("//third_party/systemlibs:nsync.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/nsync/archive/1.22.0.tar.gz",
"https://github.com/google/nsync/archive/1.22.0.tar.gz",
],
)
tf_http_archive(
name = "com_google_googletest",
sha256 = "ff7a82736e158c077e76188232eac77913a15dac0b22508c390ab3f88e6d6d86",
strip_prefix = "googletest-b6cd405286ed8635ece71c72f118e659f4ade3fb",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip",
"https://github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip",
],
)
tf_http_archive(
name = "com_github_gflags_gflags",
sha256 = "ae27cdbcd6a2f935baa78e4f21f675649271634c092b1be01469440495609d0e",
strip_prefix = "gflags-2.2.1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/gflags/gflags/archive/v2.2.1.tar.gz",
"https://github.com/gflags/gflags/archive/v2.2.1.tar.gz",
],
)
tf_http_archive(
name = "pcre",
build_file = clean_dep("//third_party:pcre.BUILD"),
sha256 = "aecafd4af3bd0f3935721af77b889d9024b2e01d96b58471bd91a3063fb47728",
strip_prefix = "pcre-8.44",
system_build_file = clean_dep("//third_party/systemlibs:pcre.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/ftp.exim.org/pub/pcre/pcre-8.44.tar.gz",
"https://ftp.exim.org/pub/pcre/pcre-8.44.tar.gz",
],
)
tf_http_archive(
name = "curl",
build_file = clean_dep("//third_party:curl.BUILD"),
sha256 = "01ae0c123dee45b01bbaef94c0bc00ed2aec89cb2ee0fd598e0d302a6b5e0a98",
strip_prefix = "curl-7.69.1",
system_build_file = clean_dep("//third_party/systemlibs:curl.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.69.1.tar.gz",
"https://curl.haxx.se/download/curl-7.69.1.tar.gz",
],
)
# WARNING: make sure ncteisen@ and vpai@ are cc-ed on any CL to change the below rule
tf_http_archive(
name = "com_github_grpc_grpc",
sha256 = "b956598d8cbe168b5ee717b5dafa56563eb5201a947856a6688bbeac9cac4e1f",
strip_prefix = "grpc-b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd",
system_build_file = clean_dep("//third_party/systemlibs:grpc.BUILD"),
patch_file = clean_dep("//third_party/grpc:generate_cc_env_fix.patch"),
system_link_files = {
"//third_party/systemlibs:BUILD": "bazel/BUILD",
"//third_party/systemlibs:grpc.BUILD": "src/compiler/BUILD",
"//third_party/systemlibs:grpc.bazel.grpc_deps.bzl": "bazel/grpc_deps.bzl",
"//third_party/systemlibs:grpc.bazel.grpc_extra_deps.bzl": "bazel/grpc_extra_deps.bzl",
"//third_party/systemlibs:grpc.bazel.cc_grpc_library.bzl": "bazel/cc_grpc_library.bzl",
"//third_party/systemlibs:grpc.bazel.generate_cc.bzl": "bazel/generate_cc.bzl",
"//third_party/systemlibs:grpc.bazel.protobuf.bzl": "bazel/protobuf.bzl",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/grpc/grpc/archive/b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd.tar.gz",
"https://github.com/grpc/grpc/archive/b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd.tar.gz",
],
)
tf_http_archive(
name = "linenoise",
build_file = clean_dep("//third_party:linenoise.BUILD"),
sha256 = "7f51f45887a3d31b4ce4fa5965210a5e64637ceac12720cfce7954d6a2e812f7",
strip_prefix = "linenoise-c894b9e59f02203dbe4e2be657572cf88c4230c3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
"https://github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
],
)
# Check out LLVM and MLIR from llvm-project.
LLVM_COMMIT = "d553243fe4b5e1992c07aff7b54b16160a4d5e97"
LLVM_SHA256 = "46b06b63414c21d86d8a91e9011f07dd974e976bbda767af66ec77c7d764f091"
LLVM_URLS = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
]
tf_http_archive(
name = "llvm-project",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = LLVM_URLS,
additional_build_files = {
clean_dep("//third_party/llvm:llvm.autogenerated.BUILD"): "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
# Intel openMP that is part of LLVM sources.
tf_http_archive(
name = "llvm_openmp",
build_file = clean_dep("//third_party/llvm_openmp:BUILD"),
sha256 = "d19f728c8e04fb1e94566c8d76aef50ec926cd2f95ef3bf1e0a5de4909b28b44",
strip_prefix = "openmp-10.0.1.src",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/releases/download/llvmorg-10.0.1/openmp-10.0.1.src.tar.xz",
"https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.1/openmp-10.0.1.src.tar.xz",
],
)
tf_http_archive(
name = "lmdb",
build_file = clean_dep("//third_party:lmdb.BUILD"),
sha256 = "f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28",
strip_prefix = "lmdb-LMDB_0.9.22/libraries/liblmdb",
system_build_file = clean_dep("//third_party/systemlibs:lmdb.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
"https://github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
],
)
tf_http_archive(
name = "jsoncpp_git",
build_file = clean_dep("//third_party:jsoncpp.BUILD"),
sha256 = "77a402fb577b2e0e5d0bdc1cf9c65278915cdb25171e3452c68b6da8a561f8f0",
strip_prefix = "jsoncpp-1.9.2",
system_build_file = clean_dep("//third_party/systemlibs:jsoncpp.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/open-source-parsers/jsoncpp/archive/1.9.2.tar.gz",
"https://github.com/open-source-parsers/jsoncpp/archive/1.9.2.tar.gz",
],
)
tf_http_archive(
name = "boringssl",
sha256 = "a9c3b03657d507975a32732f04563132b4553c20747cec6dc04de475c8bdf29f",
strip_prefix = "boringssl-80ca9f9f6ece29ab132cce4cf807a9465a18cfac",
system_build_file = clean_dep("//third_party/systemlibs:boringssl.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/boringssl/archive/80ca9f9f6ece29ab132cce4cf807a9465a18cfac.tar.gz",
"https://github.com/google/boringssl/archive/80ca9f9f6ece29ab132cce4cf807a9465a18cfac.tar.gz",
],
)
tf_http_archive(
name = "zlib",
build_file = clean_dep("//third_party:zlib.BUILD"),
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
strip_prefix = "zlib-1.2.11",
system_build_file = clean_dep("//third_party/systemlibs:zlib.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/zlib.net/zlib-1.2.11.tar.gz",
"https://zlib.net/zlib-1.2.11.tar.gz",
],
)
tf_http_archive(
name = "fft2d",
build_file = clean_dep("//third_party/fft2d:fft2d.BUILD"),
sha256 = "5f4dabc2ae21e1f537425d58a49cdca1c49ea11db0d6271e2a4b27e9697548eb",
strip_prefix = "OouraFFT-1.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/petewarden/OouraFFT/archive/v1.0.tar.gz",
"https://github.com/petewarden/OouraFFT/archive/v1.0.tar.gz",
],
)
tf_http_archive(
name = "snappy",
build_file = clean_dep("//third_party:snappy.BUILD"),
sha256 = "16b677f07832a612b0836178db7f374e414f94657c138e6993cbfc5dcc58651f",
strip_prefix = "snappy-1.1.8",
system_build_file = clean_dep("//third_party/systemlibs:snappy.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/snappy/archive/1.1.8.tar.gz",
"https://github.com/google/snappy/archive/1.1.8.tar.gz",
],
)
tf_http_archive(
name = "nccl_archive",
build_file = clean_dep("//third_party:nccl/archive.BUILD"),
patch_file = clean_dep("//third_party/nccl:archive.patch"),
sha256 = "b8eaed1fb2d0cc2f951625dc4e17185bab9ff3ab188ba4d34a6e3a01ce9f0d57",
strip_prefix = "nccl-195232556936b39b01cc908296e1650b80d4a3e9",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nvidia/nccl/archive/195232556936b39b01cc908296e1650b80d4a3e9.tar.gz",
"https://github.com/nvidia/nccl/archive/195232556936b39b01cc908296e1650b80d4a3e9.tar.gz",
],
)
java_import_external(
name = "junit",
jar_sha256 = "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"https://repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"https://maven.ibiblio.org/maven2/junit/junit/4.12/junit-4.12.jar",
],
licenses = ["reciprocal"], # Common Public License Version 1.0
testonly_ = True,
deps = ["@org_hamcrest_core"],
)
java_import_external(
name = "org_hamcrest_core",
jar_sha256 = "66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"https://repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"https://maven.ibiblio.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
)
java_import_external(
name = "com_google_testing_compile",
jar_sha256 = "edc180fdcd9f740240da1a7a45673f46f59c5578d8cd3fbc912161f74b5aebb8",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
"https://repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
deps = ["@com_google_guava", "@com_google_truth"],
)
java_import_external(
name = "com_google_truth",
jar_sha256 = "032eddc69652b0a1f8d458f999b4a9534965c646b8b5de0eba48ee69407051df",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
"https://repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
],
licenses = ["notice"], # Apache 2.0
testonly_ = True,
deps = ["@com_google_guava"],
)
java_import_external(
name = "org_checkerframework_qual",
jar_sha256 = "d261fde25d590f6b69db7721d469ac1b0a19a17ccaaaa751c31f0d8b8260b894",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar",
"https://repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
java_import_external(
name = "com_squareup_javapoet",
jar_sha256 = "5bb5abdfe4366c15c0da3332c57d484e238bd48260d6f9d6acf2b08fdde1efea",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
"https://repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
tf_http_archive(
name = "com_google_pprof",
build_file = clean_dep("//third_party:pprof.BUILD"),
sha256 = "e0928ca4aa10ea1e0551e2d7ce4d1d7ea2d84b2abbdef082b0da84268791d0c4",
strip_prefix = "pprof-c0fb62ec88c411cc91194465e54db2632845b650",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
"https://github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
],
)
# The CUDA 11 toolkit ships with CUB. We should be able to delete this rule
# once TF drops support for CUDA 10.
tf_http_archive(
name = "cub_archive",
build_file = clean_dep("//third_party:cub.BUILD"),
sha256 = "162514b3cc264ac89d91898b58450190b8192e2af1142cf8ccac2d59aa160dda",
strip_prefix = "cub-1.9.9",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NVlabs/cub/archive/1.9.9.zip",
"https://github.com/NVlabs/cub/archive/1.9.9.zip",
],
)
tf_http_archive(
name = "cython",
build_file = clean_dep("//third_party:cython.BUILD"),
delete = ["BUILD.bazel"],
sha256 = "e2e38e1f0572ca54d6085df3dec8b607d20e81515fb80215aed19c81e8fe2079",
strip_prefix = "cython-0.29.21",
system_build_file = clean_dep("//third_party/systemlibs:cython.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/cython/cython/archive/0.29.21.tar.gz",
"https://github.com/cython/cython/archive/0.29.21.tar.gz",
],
)
tf_http_archive(
name = "arm_neon_2_x86_sse",
build_file = clean_dep("//third_party:arm_neon_2_x86_sse.BUILD"),
sha256 = "213733991310b904b11b053ac224fee2d4e0179e46b52fe7f8735b8831e04dcc",
strip_prefix = "ARM_NEON_2_x86_SSE-1200fe90bb174a6224a525ee60148671a786a71f",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz",
"https://github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz",
],
)
tf_http_archive(
name = "double_conversion",
build_file = clean_dep("//third_party:double_conversion.BUILD"),
sha256 = "2f7fbffac0d98d201ad0586f686034371a6d152ca67508ab611adc2386ad30de",
strip_prefix = "double-conversion-3992066a95b823efc8ccc1baf82a1cfc73f6e9b8",
system_build_file = clean_dep("//third_party/systemlibs:double_conversion.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
"https://github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_float",
build_file = clean_dep("//third_party:tflite_mobilenet_float.BUILD"),
sha256 = "2fadeabb9968ec6833bee903900dda6e61b3947200535874ce2fe42a8493abc0",
urls = [
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_quant",
build_file = clean_dep("//third_party:tflite_mobilenet_quant.BUILD"),
sha256 = "d32432d28673a936b2d6281ab0600c71cf7226dfe4cdcef3012555f691744166",
urls = [
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "767057f2837a46d97882734b03428e8dd640b93236052b312b2f0e45613c1cf0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "a809cd290b4d6a2e8a9d5dad076e0bd695b8091974e0eed1052b480b2f21b6dc",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant_protobuf",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "09280972c5777f1aa775ef67cb4ac5d5ed21970acd8535aeca62450ef14f0d79",
strip_prefix = "ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
"https://storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
],
)
tf_http_archive(
name = "tflite_conv_actions_frozen",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "d947b38cba389b5e2d0bfc3ea6cc49c784e187b41a071387b3742d1acac7691e",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
],
)
tf_http_archive(
name = "tflite_ovic_testdata",
build_file = clean_dep("//third_party:tflite_ovic_testdata.BUILD"),
sha256 = "033c941b7829b05ca55a124a26a6a0581b1ececc154a2153cafcfdb54f80dca2",
strip_prefix = "ovic",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip",
"https://storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip",
],
)
tf_http_archive(
name = "rules_cc",
sha256 = "cf3b76a90c86c0554c5b10f4b160f05af71d252026b71362c4674e2fb9936cf9",
strip_prefix = "rules_cc-01d4a48911d5e7591ecb1c06d3b8af47fe872371",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_cc/archive/01d4a48911d5e7591ecb1c06d3b8af47fe872371.zip",
"https://github.com/bazelbuild/rules_cc/archive/01d4a48911d5e7591ecb1c06d3b8af47fe872371.zip",
],
)
tf_http_archive(
name = "rules_python",
sha256 = "aa96a691d3a8177f3215b14b0edc9641787abaaa30363a080165d06ab65e1161",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz",
"https://github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz",
],
)
tf_http_archive(
name = "build_bazel_rules_android",
sha256 = "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806",
strip_prefix = "rules_android-0.1.1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
"https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
],
)
# Apple and Swift rules.
# https://github.com/bazelbuild/rules_apple/releases
tf_http_archive(
name = "build_bazel_rules_apple",
sha256 = "ee9e6073aeb5a65c100cb9c44b0017c937706a4ae03176e14a7e78620a198079",
strip_prefix = "rules_apple-5131f3d46794bf227d296c82f30c2499c9de3c5b",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_apple/archive/5131f3d46794bf227d296c82f30c2499c9de3c5b.tar.gz",
"https://github.com/bazelbuild/rules_apple/archive/5131f3d46794bf227d296c82f30c2499c9de3c5b.tar.gz",
],
)
# https://github.com/bazelbuild/rules_swift/releases
tf_http_archive(
name = "build_bazel_rules_swift",
sha256 = "d0833bc6dad817a367936a5f902a0c11318160b5e80a20ece35fb85a5675c886",
strip_prefix = "rules_swift-3eeeb53cebda55b349d64c9fc144e18c5f7c0eb8",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_swift/archive/3eeeb53cebda55b349d64c9fc144e18c5f7c0eb8.tar.gz",
"https://github.com/bazelbuild/rules_swift/archive/3eeeb53cebda55b349d64c9fc144e18c5f7c0eb8.tar.gz",
],
)
# https://github.com/bazelbuild/apple_support/releases
tf_http_archive(
name = "build_bazel_apple_support",
sha256 = "ad8ae80e93612b8151019367a3d1604d7a51c14480dae1254e10252007e8260c",
strip_prefix = "apple_support-501b4afb27745c4813a88ffa28acd901408014e4",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/apple_support/archive/501b4afb27745c4813a88ffa28acd901408014e4.tar.gz",
"https://github.com/bazelbuild/apple_support/archive/501b4afb27745c4813a88ffa28acd901408014e4.tar.gz",
],
)
# https://github.com/bazelbuild/bazel-skylib/releases
tf_http_archive(
name = "bazel_skylib",
sha256 = "1dde365491125a3db70731e25658dfdd3bc5dbdfd11b840b3e987ecf043c7ca0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/bazel-skylib/releases/download/0.9.0/bazel_skylib-0.9.0.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/0.9.0/bazel_skylib-0.9.0.tar.gz",
],
)
# https://github.com/apple/swift-protobuf/releases
tf_http_archive(
name = "com_github_apple_swift_swift_protobuf",
strip_prefix = "swift-protobuf-1.6.0/",
sha256 = "4ccf6e5ea558e8287bf6331f9f6e52b3c321fca5f1d181d03680f415c32a6bba",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/apple/swift-protobuf/archive/1.6.0.zip",
"https://github.com/apple/swift-protobuf/archive/1.6.0.zip",
],
)
# https://github.com/google/xctestrunner/releases
http_file(
name = "xctestrunner",
executable = 1,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/xctestrunner/releases/download/0.2.9/ios_test_runner.par",
"https://github.com/google/xctestrunner/releases/download/0.2.9/ios_test_runner.par",
],
)
tf_http_archive(
name = "nlohmann_json_lib",
build_file = clean_dep("//third_party:nlohmann_json.BUILD"),
sha256 = "c377963a95989270c943d522bfefe7b889ef5ed0e1e15d535fd6f6f16ed70732",
strip_prefix = "json-3.4.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nlohmann/json/archive/v3.4.0.tar.gz",
"https://github.com/nlohmann/json/archive/v3.4.0.tar.gz",
],
)
tf_http_archive(
name = "pybind11",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/pybind/pybind11/archive/v2.6.0.tar.gz",
"https://github.com/pybind/pybind11/archive/v2.6.0.tar.gz",
],
sha256 = "90b705137b69ee3b5fc655eaca66d0dc9862ea1759226f7ccd3098425ae69571",
strip_prefix = "pybind11-2.6.0",
build_file = clean_dep("//third_party:pybind11.BUILD"),
system_build_file = clean_dep("//third_party/systemlibs:pybind11.BUILD"),
)
tf_http_archive(
name = "wrapt",
build_file = clean_dep("//third_party:wrapt.BUILD"),
sha256 = "8a6fb40e8f8b6a66b4ba81a4044c68e6a7b1782f21cfabc06fb765332b4c3e51",
strip_prefix = "wrapt-1.11.1/src/wrapt",
system_build_file = clean_dep("//third_party/systemlibs:wrapt.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz",
"https://github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz",
],
)
tf_http_archive(
name = "coremltools",
sha256 = "0d594a714e8a5fd5bd740ad112ef59155c0482e25fdc8f8efa5758f90abdcf1e",
strip_prefix = "coremltools-3.3",
build_file = clean_dep("//third_party:coremltools.BUILD"),
urls = [
"http://mirror.tensorflow.org/github.com/apple/coremltools/archive/3.3.zip",
"https://github.com/apple/coremltools/archive/3.3.zip",
],
)
tf_http_archive(
name = "tf_toolchains",
sha256 = "eb175afa73e5a33d2b5d2aabcfde6c8c3395fd7001eb5ba765a5cd98cce714ba",
strip_prefix = "toolchains-0.0.2",
build_file = clean_dep("//third_party:tf_toolchains.BUILD"),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/toolchains/archive/v0.0.2.tar.gz",
"https://github.com/tensorflow/toolchains/archive/v0.0.2.tar.gz",
],
)
def tf_bind():
"""Bind targets for some external repositories"""
##############################################################################
# BIND DEFINITIONS
#
# Please do not add bind() definitions unless we have no other choice.
# If that ends up being the case, please leave a comment explaining
# why we can't depend on the canonical build target.
# Needed by Protobuf
native.bind(
name = "grpc_cpp_plugin",
actual = "@com_github_grpc_grpc//src/compiler:grpc_cpp_plugin",
)
native.bind(
name = "grpc_python_plugin",
actual = "@com_github_grpc_grpc//src/compiler:grpc_python_plugin",
)
native.bind(
name = "grpc_lib",
actual = "@com_github_grpc_grpc//:grpc++",
)
native.bind(
name = "grpc_lib_unsecure",
actual = "@com_github_grpc_grpc//:grpc++_unsecure",
)
# Needed by Protobuf
native.bind(
name = "python_headers",
actual = clean_dep("//third_party/python_runtime:headers"),
)
# Needed by Protobuf
native.bind(
name = "six",
actual = "@six_archive//:six",
)
|
import warnings
from typing import Dict, Union, Optional, List
import pytorch_lightning as pl
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import nni
from ...graph import Evaluator
from ...serializer import serialize_cls
__all__ = ['LightningModule', 'Trainer', 'DataLoader', 'Lightning', 'Classification', 'Regression']
class LightningModule(pl.LightningModule):
def set_model(self, model):
if isinstance(model, type):
self.model = model()
else:
self.model = model
Trainer = serialize_cls(pl.Trainer)
DataLoader = serialize_cls(DataLoader)
class Lightning(Evaluator):
"""
Delegate the whole training to PyTorch Lightning.
Since the arguments passed to the initialization needs to be serialized, ``LightningModule``, ``Trainer`` or
``DataLoader`` in this file should be used. Another option is to hide dataloader in the Lightning module, in
which case, dataloaders are not required for this class to work.
Following the programming style of Lightning, metrics sent to NNI should be obtained from ``callback_metrics``
in trainer. Two hooks are added at the end of validation epoch and the end of ``fit``, respectively. The metric name
and type depend on the specific task.
Parameters
----------
lightning_module : LightningModule
Lightning module that defines the training logic.
trainer : Trainer
Lightning trainer that handles the training.
train_dataloders : DataLoader
Used in ``trainer.fit()``. A PyTorch DataLoader with training samples.
If the ``lightning_module`` has a predefined train_dataloader method this will be skipped.
val_dataloaders : DataLoader or List of DataLoader
Used in ``trainer.fit()``. Either a single PyTorch Dataloader or a list of them, specifying validation samples.
If the ``lightning_module`` has a predefined val_dataloaders method this will be skipped.
"""
def __init__(self, lightning_module: LightningModule, trainer: Trainer,
train_dataloader: Optional[DataLoader] = None,
val_dataloaders: Union[DataLoader, List[DataLoader], None] = None):
assert isinstance(lightning_module, LightningModule), f'Lightning module must be an instance of {__name__}.LightningModule.'
assert isinstance(trainer, Trainer), f'Trainer must be imported from {__name__}.'
assert _check_dataloader(train_dataloader), f'Wrong dataloader type. Try import DataLoader from {__name__}.'
assert _check_dataloader(val_dataloaders), f'Wrong dataloader type. Try import DataLoader from {__name__}.'
self.module = lightning_module
self.trainer = trainer
self.train_dataloader = train_dataloader
self.val_dataloaders = val_dataloaders
@staticmethod
def _load(ir):
return Lightning(ir['module'], ir['trainer'], ir['train_dataloader'], ir['val_dataloaders'])
def _dump(self):
return {
'module': self.module,
'trainer': self.trainer,
'train_dataloader': self.train_dataloader,
'val_dataloaders': self.val_dataloaders
}
def _execute(self, model_cls):
return self.fit(model_cls)
def __eq__(self, other):
return self.function == other.function and self.arguments == other.arguments
def fit(self, model):
"""
Fit the model with provided dataloader, with Lightning trainer.
Parameters
----------
model : nn.Module
The model to fit.
"""
self.module.set_model(model)
return self.trainer.fit(self.module, self.train_dataloader, self.val_dataloaders)
def _check_dataloader(dataloader):
if dataloader is None:
return True
if isinstance(dataloader, list):
return all([_check_dataloader(d) for d in dataloader])
return isinstance(dataloader, DataLoader)
### The following are some commonly used Lightning modules ###
class _SupervisedLearningModule(LightningModule):
def __init__(self, criterion: nn.Module, metrics: Dict[str, pl.metrics.Metric],
learning_rate: float = 0.001,
weight_decay: float = 0.,
optimizer: optim.Optimizer = optim.Adam):
super().__init__()
self.save_hyperparameters('criterion', 'optimizer', 'learning_rate', 'weight_decay')
self.criterion = criterion()
self.optimizer = optimizer
self.metrics = nn.ModuleDict({name: cls() for name, cls in metrics.items()})
def forward(self, x):
y_hat = self.model(x)
return y_hat
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = self.criterion(y_hat, y)
self.log('train_loss', loss, prog_bar=True)
for name, metric in self.metrics.items():
self.log('train_' + name, metric(y_hat, y), prog_bar=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
self.log('val_loss', self.criterion(y_hat, y), prog_bar=True)
for name, metric in self.metrics.items():
self.log('val_' + name, metric(y_hat, y), prog_bar=True)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
self.log('test_loss', self.criterion(y_hat, y), prog_bar=True)
for name, metric in self.metrics.items():
self.log('test_' + name, metric(y_hat, y), prog_bar=True)
def configure_optimizers(self):
return self.optimizer(self.parameters(), lr=self.hparams.learning_rate, weight_decay=self.hparams.weight_decay)
def on_validation_epoch_end(self):
nni.report_intermediate_result(self._get_validation_metrics())
def teardown(self, stage):
if stage == 'fit':
nni.report_final_result(self._get_validation_metrics())
def _get_validation_metrics(self):
if len(self.metrics) == 1:
metric_name = next(iter(self.metrics))
return self.trainer.callback_metrics['val_' + metric_name].item()
else:
warnings.warn('Multiple metrics without "default" is not supported by current framework.')
return {name: self.trainer.callback_metrics['val_' + name].item() for name in self.metrics}
@serialize_cls
class _ClassificationModule(_SupervisedLearningModule):
def __init__(self, criterion: nn.Module = nn.CrossEntropyLoss,
learning_rate: float = 0.001,
weight_decay: float = 0.,
optimizer: optim.Optimizer = optim.Adam):
super().__init__(criterion, {'acc': pl.metrics.Accuracy},
learning_rate=learning_rate, weight_decay=weight_decay, optimizer=optimizer)
class Classification(Lightning):
"""
Trainer that is used for classification.
Parameters
----------
criterion : nn.Module
Class for criterion module (not an instance). default: ``nn.CrossEntropyLoss``
learning_rate : float
Learning rate. default: 0.001
weight_decay : float
L2 weight decay. default: 0
optimizer : Optimizer
Class for optimizer (not an instance). default: ``Adam``
train_dataloders : DataLoader
Used in ``trainer.fit()``. A PyTorch DataLoader with training samples.
If the ``lightning_module`` has a predefined train_dataloader method this will be skipped.
val_dataloaders : DataLoader or List of DataLoader
Used in ``trainer.fit()``. Either a single PyTorch Dataloader or a list of them, specifying validation samples.
If the ``lightning_module`` has a predefined val_dataloaders method this will be skipped.
trainer_kwargs : dict
Optional keyword arguments passed to trainer. See
`Lightning documentation <https://pytorch-lightning.readthedocs.io/en/stable/trainer.html>`__ for details.
"""
def __init__(self, criterion: nn.Module = nn.CrossEntropyLoss,
learning_rate: float = 0.001,
weight_decay: float = 0.,
optimizer: optim.Optimizer = optim.Adam,
train_dataloader: Optional[DataLoader] = None,
val_dataloaders: Union[DataLoader, List[DataLoader], None] = None,
**trainer_kwargs):
module = _ClassificationModule(criterion=criterion, learning_rate=learning_rate,
weight_decay=weight_decay, optimizer=optimizer)
super().__init__(module, Trainer(**trainer_kwargs),
train_dataloader=train_dataloader, val_dataloaders=val_dataloaders)
@serialize_cls
class _RegressionModule(_SupervisedLearningModule):
def __init__(self, criterion: nn.Module = nn.MSELoss,
learning_rate: float = 0.001,
weight_decay: float = 0.,
optimizer: optim.Optimizer = optim.Adam):
super().__init__(criterion, {'mse': pl.metrics.MeanSquaredError},
learning_rate=learning_rate, weight_decay=weight_decay, optimizer=optimizer)
class Regression(Lightning):
"""
Trainer that is used for regression.
Parameters
----------
criterion : nn.Module
Class for criterion module (not an instance). default: ``nn.MSELoss``
learning_rate : float
Learning rate. default: 0.001
weight_decay : float
L2 weight decay. default: 0
optimizer : Optimizer
Class for optimizer (not an instance). default: ``Adam``
train_dataloders : DataLoader
Used in ``trainer.fit()``. A PyTorch DataLoader with training samples.
If the ``lightning_module`` has a predefined train_dataloader method this will be skipped.
val_dataloaders : DataLoader or List of DataLoader
Used in ``trainer.fit()``. Either a single PyTorch Dataloader or a list of them, specifying validation samples.
If the ``lightning_module`` has a predefined val_dataloaders method this will be skipped.
trainer_kwargs : dict
Optional keyword arguments passed to trainer. See
`Lightning documentation <https://pytorch-lightning.readthedocs.io/en/stable/trainer.html>`__ for details.
"""
def __init__(self, criterion: nn.Module = nn.MSELoss,
learning_rate: float = 0.001,
weight_decay: float = 0.,
optimizer: optim.Optimizer = optim.Adam,
train_dataloader: Optional[DataLoader] = None,
val_dataloaders: Union[DataLoader, List[DataLoader], None] = None,
**trainer_kwargs):
module = _RegressionModule(criterion=criterion, learning_rate=learning_rate,
weight_decay=weight_decay, optimizer=optimizer)
super().__init__(module, Trainer(**trainer_kwargs),
train_dataloader=train_dataloader, val_dataloaders=val_dataloaders)
|
# coding: utf-8
from __future__ import unicode_literals
import functools
import itertools
import operator
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
compat_urllib_request,
)
from .openload import PhantomJSwrapper
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
merge_dicts,
NO_DEFAULT,
orderedSet,
remove_quotes,
str_to_int,
update_url_query,
urlencode_postdata,
url_or_none,
)
class PornHubBaseIE(InfoExtractor):
_NETRC_MACHINE = 'pornhub'
def _download_webpage_handle(self, *args, **kwargs):
def dl(*args, **kwargs):
return super(PornHubBaseIE, self)._download_webpage_handle(*args, **kwargs)
ret = dl(*args, **kwargs)
if not ret:
return ret
webpage, urlh = ret
if any(re.search(p, webpage) for p in (
r'<body\b[^>]+\bonload=["\']go\(\)',
r'document\.cookie\s*=\s*["\']RNKEY=',
r'document\.location\.reload\(true\)')):
url_or_request = args[0]
url = (url_or_request.get_full_url()
if isinstance(url_or_request, compat_urllib_request.Request)
else url_or_request)
phantom = PhantomJSwrapper(self, required_version='2.0')
phantom.get(url, html=webpage)
webpage, urlh = dl(*args, **kwargs)
return webpage, urlh
def _real_initialize(self):
self._logged_in = False
def _login(self, host):
if self._logged_in:
return
site = host.split('.')[0]
# Both sites pornhub and pornhubpremium have separate accounts
# so there should be an option to provide credentials for both.
# At the same time some videos are available under the same video id
# on both sites so that we have to identify them as the same video.
# For that purpose we have to keep both in the same extractor
# but under different netrc machines.
username, password = self._get_login_info(netrc_machine=site)
if username is None:
return
login_url = 'https://www.%s/%slogin' % (host, 'premium/' if 'premium' in host else '')
login_page = self._download_webpage(
login_url, None, 'Downloading %s login page' % site)
def is_logged(webpage):
return any(re.search(p, webpage) for p in (
r'class=["\']signOut',
r'>Sign\s+[Oo]ut\s*<'))
if is_logged(login_page):
self._logged_in = True
return
login_form = self._hidden_inputs(login_page)
login_form.update({
'username': username,
'password': password,
})
response = self._download_json(
'https://www.%s/front/authenticate' % host, None,
'Logging in to %s' % site,
data=urlencode_postdata(login_form),
headers={
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': login_url,
'X-Requested-With': 'XMLHttpRequest',
})
if response.get('success') == '1':
self._logged_in = True
return
message = response.get('message')
if message is not None:
raise ExtractorError(
'Unable to login: %s' % message, expected=True)
raise ExtractorError('Unable to log in')
class PornHubIE(PornHubBaseIE):
IE_DESC = 'PornHub and Thumbzilla'
_VALID_URL = r'''(?x)
https?://
(?:
(?:[^/]+\.)?(?P<host>pornhub(?:premium)?\.(?:com|net|org))/(?:(?:view_video\.php|video/show)\?viewkey=|embed/)|
(?:www\.)?thumbzilla\.com/video/
)
(?P<id>[\da-z]+)
'''
_TESTS = [{
'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
'md5': 'a6391306d050e4547f62b3f485dd9ba9',
'info_dict': {
'id': '648719015',
'ext': 'mp4',
'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
'uploader': 'Babes',
'upload_date': '20130628',
'timestamp': 1372447216,
'duration': 361,
'view_count': int,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'age_limit': 18,
'tags': list,
'categories': list,
},
}, {
# non-ASCII title
'url': 'http://www.pornhub.com/view_video.php?viewkey=1331683002',
'info_dict': {
'id': '1331683002',
'ext': 'mp4',
'title': '重庆婷婷女王足交',
'upload_date': '20150213',
'timestamp': 1423804862,
'duration': 1753,
'view_count': int,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'age_limit': 18,
'tags': list,
'categories': list,
},
'params': {
'skip_download': True,
},
'skip': 'Video has been flagged for verification in accordance with our trust and safety policy',
}, {
# subtitles
'url': 'https://www.pornhub.com/view_video.php?viewkey=ph5af5fef7c2aa7',
'info_dict': {
'id': 'ph5af5fef7c2aa7',
'ext': 'mp4',
'title': 'BFFS - Cute Teen Girls Share Cock On the Floor',
'uploader': 'BFFs',
'duration': 622,
'view_count': int,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'age_limit': 18,
'tags': list,
'categories': list,
'subtitles': {
'en': [{
"ext": 'srt'
}]
},
},
'params': {
'skip_download': True,
},
'skip': 'This video has been disabled',
}, {
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
'only_matching': True,
}, {
# removed at the request of cam4.com
'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862',
'only_matching': True,
}, {
# removed at the request of the copyright owner
'url': 'http://www.pornhub.com/view_video.php?viewkey=788152859',
'only_matching': True,
}, {
# removed by uploader
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph572716d15a111',
'only_matching': True,
}, {
# private video
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph56fd731fce6b7',
'only_matching': True,
}, {
'url': 'https://www.thumbzilla.com/video/ph56c6114abd99a/horny-girlfriend-sex',
'only_matching': True,
}, {
'url': 'http://www.pornhub.com/video/show?viewkey=648719015',
'only_matching': True,
}, {
'url': 'https://www.pornhub.net/view_video.php?viewkey=203640933',
'only_matching': True,
}, {
'url': 'https://www.pornhub.org/view_video.php?viewkey=203640933',
'only_matching': True,
}, {
'url': 'https://www.pornhubpremium.com/view_video.php?viewkey=ph5e4acdae54a82',
'only_matching': True,
}, {
# Some videos are available with the same id on both premium
# and non-premium sites (e.g. this and the following test)
'url': 'https://www.pornhub.com/view_video.php?viewkey=ph5f75b0f4b18e3',
'only_matching': True,
}, {
'url': 'https://www.pornhubpremium.com/view_video.php?viewkey=ph5f75b0f4b18e3',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return re.findall(
r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub(?:premium)?\.(?:com|net|org)/embed/[\da-z]+)',
webpage)
def _extract_count(self, pattern, webpage, name):
return str_to_int(self._search_regex(
pattern, webpage, '%s count' % name, fatal=False))
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
host = mobj.group('host') or 'pornhub.com'
video_id = mobj.group('id')
self._login(host)
self._set_cookie(host, 'age_verified', '1')
def dl_webpage(platform):
self._set_cookie(host, 'platform', platform)
return self._download_webpage(
'https://www.%s/view_video.php?viewkey=%s' % (host, video_id),
video_id, 'Downloading %s webpage' % platform)
webpage = dl_webpage('pc')
error_msg = self._html_search_regex(
(r'(?s)<div[^>]+class=(["\'])(?:(?!\1).)*\b(?:removed|userMessageSection)\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</div>',
r'(?s)<section[^>]+class=["\']noVideo["\'][^>]*>(?P<error>.+?)</section>'),
webpage, 'error message', default=None, group='error')
if error_msg:
error_msg = re.sub(r'\s+', ' ', error_msg)
raise ExtractorError(
'PornHub said: %s' % error_msg,
expected=True, video_id=video_id)
# video_title from flashvars contains whitespace instead of non-ASCII (see
# http://www.pornhub.com/view_video.php?viewkey=1331683002), not relying
# on that anymore.
title = self._html_search_meta(
'twitter:title', webpage, default=None) or self._html_search_regex(
(r'(?s)<h1[^>]+class=["\']title["\'][^>]*>(?P<title>.+?)</h1>',
r'<div[^>]+data-video-title=(["\'])(?P<title>(?:(?!\1).)+)\1',
r'shareTitle["\']\s*[=:]\s*(["\'])(?P<title>(?:(?!\1).)+)\1'),
webpage, 'title', group='title')
video_urls = []
video_urls_set = set()
subtitles = {}
flashvars = self._parse_json(
self._search_regex(
r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
video_id)
if flashvars:
subtitle_url = url_or_none(flashvars.get('closedCaptionsFile'))
if subtitle_url:
subtitles.setdefault('en', []).append({
'url': subtitle_url,
'ext': 'srt',
})
thumbnail = flashvars.get('image_url')
duration = int_or_none(flashvars.get('video_duration'))
media_definitions = flashvars.get('mediaDefinitions')
if isinstance(media_definitions, list):
for definition in media_definitions:
if not isinstance(definition, dict):
continue
video_url = definition.get('videoUrl')
if not video_url or not isinstance(video_url, compat_str):
continue
if video_url in video_urls_set:
continue
video_urls_set.add(video_url)
video_urls.append(
(video_url, int_or_none(definition.get('quality'))))
else:
thumbnail, duration = [None] * 2
def extract_js_vars(webpage, pattern, default=NO_DEFAULT):
assignments = self._search_regex(
pattern, webpage, 'encoded url', default=default)
if not assignments:
return {}
assignments = assignments.split(';')
js_vars = {}
def parse_js_value(inp):
inp = re.sub(r'/\*(?:(?!\*/).)*?\*/', '', inp)
if '+' in inp:
inps = inp.split('+')
return functools.reduce(
operator.concat, map(parse_js_value, inps))
inp = inp.strip()
if inp in js_vars:
return js_vars[inp]
return remove_quotes(inp)
for assn in assignments:
assn = assn.strip()
if not assn:
continue
assn = re.sub(r'var\s+', '', assn)
vname, value = assn.split('=', 1)
js_vars[vname] = parse_js_value(value)
return js_vars
def add_video_url(video_url):
v_url = url_or_none(video_url)
if not v_url:
return
if v_url in video_urls_set:
return
video_urls.append((v_url, None))
video_urls_set.add(v_url)
def parse_quality_items(quality_items):
q_items = self._parse_json(quality_items, video_id, fatal=False)
if not isinstance(q_items, list):
return
for item in q_items:
if isinstance(item, dict):
add_video_url(item.get('url'))
if not video_urls:
FORMAT_PREFIXES = ('media', 'quality', 'qualityItems')
js_vars = extract_js_vars(
webpage, r'(var\s+(?:%s)_.+)' % '|'.join(FORMAT_PREFIXES),
default=None)
if js_vars:
for key, format_url in js_vars.items():
if key.startswith(FORMAT_PREFIXES[-1]):
parse_quality_items(format_url)
elif any(key.startswith(p) for p in FORMAT_PREFIXES[:2]):
add_video_url(format_url)
if not video_urls and re.search(
r'<[^>]+\bid=["\']lockedPlayer', webpage):
raise ExtractorError(
'Video %s is locked' % video_id, expected=True)
if not video_urls:
js_vars = extract_js_vars(
dl_webpage('tv'), r'(var.+?mediastring.+?)</script>')
add_video_url(js_vars['mediastring'])
for mobj in re.finditer(
r'<a[^>]+\bclass=["\']downloadBtn\b[^>]+\bhref=(["\'])(?P<url>(?:(?!\1).)+)\1',
webpage):
video_url = mobj.group('url')
if video_url not in video_urls_set:
video_urls.append((video_url, None))
video_urls_set.add(video_url)
upload_date = None
formats = []
def add_format(format_url, height=None):
ext = determine_ext(format_url)
if ext == 'mpd':
formats.extend(self._extract_mpd_formats(
format_url, video_id, mpd_id='dash', fatal=False))
return
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
return
tbr = None
mobj = re.search(r'(?P<height>\d+)[pP]?_(?P<tbr>\d+)[kK]', format_url)
if mobj:
if not height:
height = int(mobj.group('height'))
tbr = int(mobj.group('tbr'))
formats.append({
'url': format_url,
'format_id': '%dp' % height if height else None,
'height': height,
'tbr': tbr,
})
for video_url, height in video_urls:
if not upload_date:
upload_date = self._search_regex(
r'/(\d{6}/\d{2})/', video_url, 'upload data', default=None)
if upload_date:
upload_date = upload_date.replace('/', '')
if '/video/get_media' in video_url:
medias = self._download_json(video_url, video_id, fatal=False)
if isinstance(medias, list):
for media in medias:
if not isinstance(media, dict):
continue
video_url = url_or_none(media.get('videoUrl'))
if not video_url:
continue
height = int_or_none(media.get('quality'))
add_format(video_url, height)
continue
add_format(video_url)
self._sort_formats(formats)
video_uploader = self._html_search_regex(
r'(?s)From: .+?<(?:a\b[^>]+\bhref=["\']/(?:(?:user|channel)s|model|pornstar)/|span\b[^>]+\bclass=["\']username)[^>]+>(.+?)<',
webpage, 'uploader', default=None)
def extract_vote_count(kind, name):
return self._extract_count(
(r'<span[^>]+\bclass="votes%s"[^>]*>([\d,\.]+)</span>' % kind,
r'<span[^>]+\bclass=["\']votes%s["\'][^>]*\bdata-rating=["\'](\d+)' % kind),
webpage, name)
view_count = self._extract_count(
r'<span class="count">([\d,\.]+)</span> [Vv]iews', webpage, 'view')
like_count = extract_vote_count('Up', 'like')
dislike_count = extract_vote_count('Down', 'dislike')
comment_count = self._extract_count(
r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
def extract_list(meta_key):
div = self._search_regex(
r'(?s)<div[^>]+\bclass=["\'].*?\b%sWrapper[^>]*>(.+?)</div>'
% meta_key, webpage, meta_key, default=None)
if div:
return re.findall(r'<a[^>]+\bhref=[^>]+>([^<]+)', div)
info = self._search_json_ld(webpage, video_id, default={})
# description provided in JSON-LD is irrelevant
info['description'] = None
return merge_dicts({
'id': video_id,
'uploader': video_uploader,
'upload_date': upload_date,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
'formats': formats,
'age_limit': 18,
'tags': extract_list('tags'),
'categories': extract_list('categories'),
'subtitles': subtitles,
}, info)
class PornHubPlaylistBaseIE(PornHubBaseIE):
def _extract_page(self, url):
return int_or_none(self._search_regex(
r'\bpage=(\d+)', url, 'page', default=None))
def _extract_entries(self, webpage, host):
# Only process container div with main playlist content skipping
# drop-down menu that uses similar pattern for videos (see
# https://github.com/ytdl-org/youtube-dl/issues/11594).
container = self._search_regex(
r'(?s)(<div[^>]+class=["\']container.+)', webpage,
'container', default=webpage)
return [
self.url_result(
'http://www.%s/%s' % (host, video_url),
PornHubIE.ie_key(), video_title=title)
for video_url, title in orderedSet(re.findall(
r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"[^>]*\s+title="([^"]+)"',
container))
]
class PornHubUserIE(PornHubPlaylistBaseIE):
_VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?(?P<host>pornhub(?:premium)?\.(?:com|net|org))/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/?#&]+))(?:[?#&]|/(?!videos)|$)'
_TESTS = [{
'url': 'https://www.pornhub.com/model/zoe_ph',
'playlist_mincount': 118,
}, {
'url': 'https://www.pornhub.com/pornstar/liz-vicious',
'info_dict': {
'id': 'liz-vicious',
},
'playlist_mincount': 118,
}, {
'url': 'https://www.pornhub.com/users/russianveet69',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/channels/povd',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/model/zoe_ph?abc=1',
'only_matching': True,
}, {
# Unavailable via /videos page, but available with direct pagination
# on pornstar page (see [1]), requires premium
# 1. https://github.com/ytdl-org/youtube-dl/issues/27853
'url': 'https://www.pornhubpremium.com/pornstar/sienna-west',
'only_matching': True,
}, {
# Same as before, multi page
'url': 'https://www.pornhubpremium.com/pornstar/lily-labeau',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user_id = mobj.group('id')
videos_url = '%s/videos' % mobj.group('url')
page = self._extract_page(url)
if page:
videos_url = update_url_query(videos_url, {'page': page})
return self.url_result(
videos_url, ie=PornHubPagedVideoListIE.ie_key(), video_id=user_id)
class PornHubPagedPlaylistBaseIE(PornHubPlaylistBaseIE):
@staticmethod
def _has_more(webpage):
return re.search(
r'''(?x)
<li[^>]+\bclass=["\']page_next|
<link[^>]+\brel=["\']next|
<button[^>]+\bid=["\']moreDataBtn
''', webpage) is not None
def _entries(self, url, host, item_id):
page = self._extract_page(url)
VIDEOS = '/videos'
def download_page(base_url, num, fallback=False):
note = 'Downloading page %d%s' % (num, ' (switch to fallback)' if fallback else '')
return self._download_webpage(
base_url, item_id, note, query={'page': num})
def is_404(e):
return isinstance(e.cause, compat_HTTPError) and e.cause.code == 404
base_url = url
has_page = page is not None
first_page = page if has_page else 1
for page_num in (first_page, ) if has_page else itertools.count(first_page):
try:
try:
webpage = download_page(base_url, page_num)
except ExtractorError as e:
# Some sources may not be available via /videos page,
# trying to fallback to main page pagination (see [1])
# 1. https://github.com/ytdl-org/youtube-dl/issues/27853
if is_404(e) and page_num == first_page and VIDEOS in base_url:
base_url = base_url.replace(VIDEOS, '')
webpage = download_page(base_url, page_num, fallback=True)
else:
raise
except ExtractorError as e:
if is_404(e) and page_num != first_page:
break
raise
page_entries = self._extract_entries(webpage, host)
if not page_entries:
break
for e in page_entries:
yield e
if not self._has_more(webpage):
break
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
host = mobj.group('host')
item_id = mobj.group('id')
self._login(host)
return self.playlist_result(self._entries(url, host, item_id), item_id)
class PornHubPagedVideoListIE(PornHubPagedPlaylistBaseIE):
_VALID_URL = r'https?://(?:[^/]+\.)?(?P<host>pornhub(?:premium)?\.(?:com|net|org))/(?P<id>(?:[^/]+/)*[^/?#&]+)'
_TESTS = [{
'url': 'https://www.pornhub.com/model/zoe_ph/videos',
'only_matching': True,
}, {
'url': 'http://www.pornhub.com/users/rushandlia/videos',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos',
'info_dict': {
'id': 'pornstar/jenny-blighe/videos',
},
'playlist_mincount': 149,
}, {
'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos?page=3',
'info_dict': {
'id': 'pornstar/jenny-blighe/videos',
},
'playlist_mincount': 40,
}, {
# default sorting as Top Rated Videos
'url': 'https://www.pornhub.com/channels/povd/videos',
'info_dict': {
'id': 'channels/povd/videos',
},
'playlist_mincount': 293,
}, {
# Top Rated Videos
'url': 'https://www.pornhub.com/channels/povd/videos?o=ra',
'only_matching': True,
}, {
# Most Recent Videos
'url': 'https://www.pornhub.com/channels/povd/videos?o=da',
'only_matching': True,
}, {
# Most Viewed Videos
'url': 'https://www.pornhub.com/channels/povd/videos?o=vi',
'only_matching': True,
}, {
'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
'only_matching': True,
}, {
# Most Viewed Videos
'url': 'https://www.pornhub.com/pornstar/liz-vicious/videos?o=mv',
'only_matching': True,
}, {
# Top Rated Videos
'url': 'https://www.pornhub.com/pornstar/liz-vicious/videos?o=tr',
'only_matching': True,
}, {
# Longest Videos
'url': 'https://www.pornhub.com/pornstar/liz-vicious/videos?o=lg',
'only_matching': True,
}, {
# Newest Videos
'url': 'https://www.pornhub.com/pornstar/liz-vicious/videos?o=cm',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/pornstar/liz-vicious/videos/paid',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/pornstar/liz-vicious/videos/fanonly',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/video',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/video?page=3',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/video/search?search=123',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/categories/teen',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/categories/teen?page=3',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/hd',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/hd?page=3',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/described-video',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/described-video?page=2',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/video/incategories/60fps-1/hd-porn',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/playlist/44121572',
'info_dict': {
'id': 'playlist/44121572',
},
'playlist_mincount': 132,
}, {
'url': 'https://www.pornhub.com/playlist/4667351',
'only_matching': True,
}, {
'url': 'https://de.pornhub.com/playlist/4667351',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return (False
if PornHubIE.suitable(url) or PornHubUserIE.suitable(url) or PornHubUserVideosUploadIE.suitable(url)
else super(PornHubPagedVideoListIE, cls).suitable(url))
class PornHubUserVideosUploadIE(PornHubPagedPlaylistBaseIE):
_VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?(?P<host>pornhub(?:premium)?\.(?:com|net|org))/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/]+)/videos/upload)'
_TESTS = [{
'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos/upload',
'info_dict': {
'id': 'jenny-blighe',
},
'playlist_mincount': 129,
}, {
'url': 'https://www.pornhub.com/model/zoe_ph/videos/upload',
'only_matching': True,
}]
|
"""
Creation and extension of validators, with implementations for existing drafts.
"""
from collections.abc import Sequence
from functools import lru_cache
from urllib.parse import unquote, urldefrag, urljoin, urlsplit
from urllib.request import urlopen
from warnings import warn
import contextlib
import json
import warnings
from jsonschema import (
_legacy_validators,
_types,
_utils,
_validators,
exceptions,
)
validators = {}
meta_schemas = _utils.URIDict()
_VOCABULARIES = _utils.URIDict()
def __getattr__(name):
if name == "ErrorTree":
warnings.warn(
"Importing ErrorTree from jsonschema.validators is deprecated. "
"Instead import it from jsonschema.exceptions.",
DeprecationWarning,
)
from jsonschema.exceptions import ErrorTree
return ErrorTree
raise AttributeError(f"module {__name__} has no attribute {name}")
def validates(version):
"""
Register the decorated validator for a ``version`` of the specification.
Registered validators and their meta schemas will be considered when
parsing ``$schema`` properties' URIs.
Arguments:
version (str):
An identifier to use as the version's name
Returns:
collections.abc.Callable:
a class decorator to decorate the validator with the version
"""
def _validates(cls):
validators[version] = cls
meta_schema_id = cls.ID_OF(cls.META_SCHEMA)
meta_schemas[meta_schema_id] = cls
for vocabulary in cls.VOCABULARY_SCHEMAS:
vocabulary_id = cls.ID_OF(vocabulary)
_VOCABULARIES[vocabulary_id] = vocabulary
return cls
return _validates
def _id_of(schema):
if schema is True or schema is False:
return ""
return schema.get("$id", "")
def _store_schema_list():
return [
(id, validator.META_SCHEMA) for id, validator in meta_schemas.items()
] + [
(id, schema) for id, schema in _VOCABULARIES.items()
]
def create(
meta_schema,
vocabulary_schemas=(),
validators=(),
version=None,
type_checker=_types.draft7_type_checker,
id_of=_id_of,
applicable_validators=lambda schema: schema.items(),
):
"""
Create a new validator class.
Arguments:
meta_schema (collections.abc.Mapping):
the meta schema for the new validator class
validators (collections.abc.Mapping):
a mapping from names to callables, where each callable will
validate the schema property with the given name.
Each callable should take 4 arguments:
1. a validator instance,
2. the value of the property being validated within the
instance
3. the instance
4. the schema
version (str):
an identifier for the version that this validator class will
validate. If provided, the returned validator class will
have its ``__name__`` set to include the version, and also
will have `jsonschema.validators.validates` automatically
called for the given version.
type_checker (jsonschema.TypeChecker):
a type checker, used when applying the :validator:`type` validator.
If unprovided, a `jsonschema.TypeChecker` will be created
with a set of default types typical of JSON Schema drafts.
id_of (collections.abc.Callable):
A function that given a schema, returns its ID.
applicable_validators (collections.abc.Callable):
A function that given a schema, returns the list of applicable
validators (names and callables) which will be called on to
validate the instance.
Returns:
a new `jsonschema.IValidator` class
"""
class Validator:
VALIDATORS = dict(validators)
META_SCHEMA = dict(meta_schema)
VOCABULARY_SCHEMAS = list(vocabulary_schemas)
TYPE_CHECKER = type_checker
ID_OF = staticmethod(id_of)
def __init__(self, schema, resolver=None, format_checker=None):
if resolver is None:
resolver = RefResolver.from_schema(schema, id_of=id_of)
self.resolver = resolver
self.format_checker = format_checker
self.schema = schema
@classmethod
def check_schema(cls, schema):
for error in cls(cls.META_SCHEMA).iter_errors(schema):
raise exceptions.SchemaError.create_from(error)
def iter_errors(self, instance, _schema=None):
if _schema is None:
_schema = self.schema
if _schema is True:
return
elif _schema is False:
yield exceptions.ValidationError(
f"False schema does not allow {instance!r}",
validator=None,
validator_value=None,
instance=instance,
schema=_schema,
)
return
scope = id_of(_schema)
if scope:
self.resolver.push_scope(scope)
try:
validators = applicable_validators(_schema)
for k, v in validators:
validator = self.VALIDATORS.get(k)
if validator is None:
continue
errors = validator(self, v, instance, _schema) or ()
for error in errors:
# set details if not already set by the called fn
error._set(
validator=k,
validator_value=v,
instance=instance,
schema=_schema,
)
if k not in {"if", "$ref"}:
error.schema_path.appendleft(k)
yield error
finally:
if scope:
self.resolver.pop_scope()
def descend(self, instance, schema, path=None, schema_path=None):
for error in self.iter_errors(instance, schema):
if path is not None:
error.path.appendleft(path)
if schema_path is not None:
error.schema_path.appendleft(schema_path)
yield error
def validate(self, *args, **kwargs):
for error in self.iter_errors(*args, **kwargs):
raise error
def is_type(self, instance, type):
try:
return self.TYPE_CHECKER.is_type(instance, type)
except exceptions.UndefinedTypeCheck:
raise exceptions.UnknownType(type, instance, self.schema)
def is_valid(self, instance, _schema=None):
error = next(self.iter_errors(instance, _schema), None)
return error is None
if version is not None:
Validator = validates(version)(Validator)
Validator.__name__ = (
version.title().replace(" ", "").replace("-", "") + "Validator"
)
return Validator
def extend(validator, validators=(), version=None, type_checker=None):
"""
Create a new validator class by extending an existing one.
Arguments:
validator (jsonschema.IValidator):
an existing validator class
validators (collections.abc.Mapping):
a mapping of new validator callables to extend with, whose
structure is as in `create`.
.. note::
Any validator callables with the same name as an
existing one will (silently) replace the old validator
callable entirely, effectively overriding any validation
done in the "parent" validator class.
If you wish to instead extend the behavior of a parent's
validator callable, delegate and call it directly in
the new validator function by retrieving it using
``OldValidator.VALIDATORS["validator_name"]``.
version (str):
a version for the new validator class
type_checker (jsonschema.TypeChecker):
a type checker, used when applying the :validator:`type` validator.
If unprovided, the type checker of the extended
`jsonschema.IValidator` will be carried along.
Returns:
a new `jsonschema.IValidator` class extending the one provided
.. note:: Meta Schemas
The new validator class will have its parent's meta schema.
If you wish to change or extend the meta schema in the new
validator class, modify ``META_SCHEMA`` directly on the returned
class. Note that no implicit copying is done, so a copy should
likely be made before modifying it, in order to not affect the
old validator.
"""
all_validators = dict(validator.VALIDATORS)
all_validators.update(validators)
if type_checker is None:
type_checker = validator.TYPE_CHECKER
return create(
meta_schema=validator.META_SCHEMA,
validators=all_validators,
version=version,
type_checker=type_checker,
id_of=validator.ID_OF,
)
Draft3Validator = create(
meta_schema=_utils.load_schema("draft3"),
validators={
"$ref": _validators.ref,
"additionalItems": _validators.additionalItems,
"additionalProperties": _validators.additionalProperties,
"dependencies": _legacy_validators.dependencies_draft3,
"disallow": _legacy_validators.disallow_draft3,
"divisibleBy": _validators.multipleOf,
"enum": _validators.enum,
"extends": _legacy_validators.extends_draft3,
"format": _validators.format,
"items": _legacy_validators.items_draft3_draft4,
"maxItems": _validators.maxItems,
"maxLength": _validators.maxLength,
"maximum": _legacy_validators.maximum_draft3_draft4,
"minItems": _validators.minItems,
"minLength": _validators.minLength,
"minimum": _legacy_validators.minimum_draft3_draft4,
"pattern": _validators.pattern,
"patternProperties": _validators.patternProperties,
"properties": _legacy_validators.properties_draft3,
"type": _legacy_validators.type_draft3,
"uniqueItems": _validators.uniqueItems,
},
type_checker=_types.draft3_type_checker,
version="draft3",
id_of=lambda schema: schema.get("id", ""),
applicable_validators=_legacy_validators.ignore_ref_siblings,
)
Draft4Validator = create(
meta_schema=_utils.load_schema("draft4"),
validators={
"$ref": _validators.ref,
"additionalItems": _validators.additionalItems,
"additionalProperties": _validators.additionalProperties,
"allOf": _validators.allOf,
"anyOf": _validators.anyOf,
"dependencies": _legacy_validators.dependencies_draft4_draft6_draft7,
"enum": _validators.enum,
"format": _validators.format,
"items": _legacy_validators.items_draft3_draft4,
"maxItems": _validators.maxItems,
"maxLength": _validators.maxLength,
"maxProperties": _validators.maxProperties,
"maximum": _legacy_validators.maximum_draft3_draft4,
"minItems": _validators.minItems,
"minLength": _validators.minLength,
"minProperties": _validators.minProperties,
"minimum": _legacy_validators.minimum_draft3_draft4,
"multipleOf": _validators.multipleOf,
"not": _validators.not_,
"oneOf": _validators.oneOf,
"pattern": _validators.pattern,
"patternProperties": _validators.patternProperties,
"properties": _validators.properties,
"required": _validators.required,
"type": _validators.type,
"uniqueItems": _validators.uniqueItems,
},
type_checker=_types.draft4_type_checker,
version="draft4",
id_of=lambda schema: schema.get("id", ""),
applicable_validators=_legacy_validators.ignore_ref_siblings,
)
Draft6Validator = create(
meta_schema=_utils.load_schema("draft6"),
validators={
"$ref": _validators.ref,
"additionalItems": _validators.additionalItems,
"additionalProperties": _validators.additionalProperties,
"allOf": _validators.allOf,
"anyOf": _validators.anyOf,
"const": _validators.const,
"contains": _legacy_validators.contains_draft6_draft7,
"dependencies": _legacy_validators.dependencies_draft4_draft6_draft7,
"enum": _validators.enum,
"exclusiveMaximum": _validators.exclusiveMaximum,
"exclusiveMinimum": _validators.exclusiveMinimum,
"format": _validators.format,
"items": _legacy_validators.items_draft6_draft7_draft201909,
"maxItems": _validators.maxItems,
"maxLength": _validators.maxLength,
"maxProperties": _validators.maxProperties,
"maximum": _validators.maximum,
"minItems": _validators.minItems,
"minLength": _validators.minLength,
"minProperties": _validators.minProperties,
"minimum": _validators.minimum,
"multipleOf": _validators.multipleOf,
"not": _validators.not_,
"oneOf": _validators.oneOf,
"pattern": _validators.pattern,
"patternProperties": _validators.patternProperties,
"properties": _validators.properties,
"propertyNames": _validators.propertyNames,
"required": _validators.required,
"type": _validators.type,
"uniqueItems": _validators.uniqueItems,
},
type_checker=_types.draft6_type_checker,
version="draft6",
applicable_validators=_legacy_validators.ignore_ref_siblings,
)
Draft7Validator = create(
meta_schema=_utils.load_schema("draft7"),
validators={
"$ref": _validators.ref,
"additionalItems": _validators.additionalItems,
"additionalProperties": _validators.additionalProperties,
"allOf": _validators.allOf,
"anyOf": _validators.anyOf,
"const": _validators.const,
"contains": _legacy_validators.contains_draft6_draft7,
"dependencies": _legacy_validators.dependencies_draft4_draft6_draft7,
"enum": _validators.enum,
"exclusiveMaximum": _validators.exclusiveMaximum,
"exclusiveMinimum": _validators.exclusiveMinimum,
"format": _validators.format,
"if": _validators.if_,
"items": _legacy_validators.items_draft6_draft7_draft201909,
"maxItems": _validators.maxItems,
"maxLength": _validators.maxLength,
"maxProperties": _validators.maxProperties,
"maximum": _validators.maximum,
"minItems": _validators.minItems,
"minLength": _validators.minLength,
"minProperties": _validators.minProperties,
"minimum": _validators.minimum,
"multipleOf": _validators.multipleOf,
"not": _validators.not_,
"oneOf": _validators.oneOf,
"pattern": _validators.pattern,
"patternProperties": _validators.patternProperties,
"properties": _validators.properties,
"propertyNames": _validators.propertyNames,
"required": _validators.required,
"type": _validators.type,
"uniqueItems": _validators.uniqueItems,
},
type_checker=_types.draft7_type_checker,
version="draft7",
applicable_validators=_legacy_validators.ignore_ref_siblings,
)
Draft201909Validator = create(
meta_schema=_utils.load_schema("draft2019-09"),
vocabulary_schemas=_utils.load_vocabulary("draft2019-09"),
validators={
"$recursiveRef": _legacy_validators.recursiveRef,
"$ref": _validators.ref,
"additionalItems": _validators.additionalItems,
"additionalProperties": _validators.additionalProperties,
"allOf": _validators.allOf,
"anyOf": _validators.anyOf,
"const": _validators.const,
"contains": _validators.contains,
"dependentRequired": _validators.dependentRequired,
"dependentSchemas": _validators.dependentSchemas,
"enum": _validators.enum,
"exclusiveMaximum": _validators.exclusiveMaximum,
"exclusiveMinimum": _validators.exclusiveMinimum,
"format": _validators.format,
"if": _validators.if_,
"items": _legacy_validators.items_draft6_draft7_draft201909,
"maxItems": _validators.maxItems,
"maxLength": _validators.maxLength,
"maxProperties": _validators.maxProperties,
"maximum": _validators.maximum,
"minItems": _validators.minItems,
"minLength": _validators.minLength,
"minProperties": _validators.minProperties,
"minimum": _validators.minimum,
"multipleOf": _validators.multipleOf,
"not": _validators.not_,
"oneOf": _validators.oneOf,
"pattern": _validators.pattern,
"patternProperties": _validators.patternProperties,
"properties": _validators.properties,
"propertyNames": _validators.propertyNames,
"required": _validators.required,
"type": _validators.type,
"unevaluatedItems": _validators.unevaluatedItems,
"unevaluatedProperties": _validators.unevaluatedProperties,
"uniqueItems": _validators.uniqueItems,
},
type_checker=_types.draft201909_type_checker,
version="draft2019-09",
)
Draft202012Validator = create(
meta_schema=_utils.load_schema("draft2020-12"),
vocabulary_schemas=_utils.load_vocabulary("draft2020-12"),
validators={
"$dynamicRef": _validators.dynamicRef,
"$ref": _validators.ref,
"additionalItems": _validators.additionalItems,
"additionalProperties": _validators.additionalProperties,
"allOf": _validators.allOf,
"anyOf": _validators.anyOf,
"const": _validators.const,
"contains": _validators.contains,
"dependentRequired": _validators.dependentRequired,
"dependentSchemas": _validators.dependentSchemas,
"enum": _validators.enum,
"exclusiveMaximum": _validators.exclusiveMaximum,
"exclusiveMinimum": _validators.exclusiveMinimum,
"format": _validators.format,
"if": _validators.if_,
"items": _validators.items,
"maxItems": _validators.maxItems,
"maxLength": _validators.maxLength,
"maxProperties": _validators.maxProperties,
"maximum": _validators.maximum,
"minItems": _validators.minItems,
"minLength": _validators.minLength,
"minProperties": _validators.minProperties,
"minimum": _validators.minimum,
"multipleOf": _validators.multipleOf,
"not": _validators.not_,
"oneOf": _validators.oneOf,
"pattern": _validators.pattern,
"patternProperties": _validators.patternProperties,
"prefixItems": _validators.prefixItems,
"properties": _validators.properties,
"propertyNames": _validators.propertyNames,
"required": _validators.required,
"type": _validators.type,
"unevaluatedItems": _validators.unevaluatedItems,
"unevaluatedProperties": _validators.unevaluatedProperties,
"uniqueItems": _validators.uniqueItems,
},
type_checker=_types.draft202012_type_checker,
version="draft2020-12",
)
_LATEST_VERSION = Draft202012Validator
class RefResolver(object):
"""
Resolve JSON References.
Arguments:
base_uri (str):
The URI of the referring document
referrer:
The actual referring document
store (dict):
A mapping from URIs to documents to cache
cache_remote (bool):
Whether remote refs should be cached after first resolution
handlers (dict):
A mapping from URI schemes to functions that should be used
to retrieve them
urljoin_cache (:func:`functools.lru_cache`):
A cache that will be used for caching the results of joining
the resolution scope to subscopes.
remote_cache (:func:`functools.lru_cache`):
A cache that will be used for caching the results of
resolved remote URLs.
Attributes:
cache_remote (bool):
Whether remote refs should be cached after first resolution
"""
def __init__(
self,
base_uri,
referrer,
store=(),
cache_remote=True,
handlers=(),
urljoin_cache=None,
remote_cache=None,
):
if urljoin_cache is None:
urljoin_cache = lru_cache(1024)(urljoin)
if remote_cache is None:
remote_cache = lru_cache(1024)(self.resolve_from_url)
self.referrer = referrer
self.cache_remote = cache_remote
self.handlers = dict(handlers)
self._scopes_stack = [base_uri]
self.store = _utils.URIDict(_store_schema_list())
self.store.update(store)
self.store[base_uri] = referrer
self._urljoin_cache = urljoin_cache
self._remote_cache = remote_cache
@classmethod
def from_schema(cls, schema, id_of=_id_of, *args, **kwargs):
"""
Construct a resolver from a JSON schema object.
Arguments:
schema:
the referring schema
Returns:
`RefResolver`
"""
return cls(base_uri=id_of(schema), referrer=schema, *args, **kwargs)
def push_scope(self, scope):
"""
Enter a given sub-scope.
Treats further dereferences as being performed underneath the
given scope.
"""
self._scopes_stack.append(
self._urljoin_cache(self.resolution_scope, scope),
)
def pop_scope(self):
"""
Exit the most recent entered scope.
Treats further dereferences as being performed underneath the
original scope.
Don't call this method more times than `push_scope` has been
called.
"""
try:
self._scopes_stack.pop()
except IndexError:
raise exceptions.RefResolutionError(
"Failed to pop the scope from an empty stack. "
"`pop_scope()` should only be called once for every "
"`push_scope()`",
)
@property
def resolution_scope(self):
"""
Retrieve the current resolution scope.
"""
return self._scopes_stack[-1]
@property
def scopes_stack_copy(self):
"""
Retrieve a copy of the stack of resolution scopes.
"""
return self._scopes_stack.copy()
@property
def base_uri(self):
"""
Retrieve the current base URI, not including any fragment.
"""
uri, _ = urldefrag(self.resolution_scope)
return uri
@contextlib.contextmanager
def in_scope(self, scope):
"""
Temporarily enter the given scope for the duration of the context.
"""
self.push_scope(scope)
try:
yield
finally:
self.pop_scope()
@contextlib.contextmanager
def resolving(self, ref):
"""
Resolve the given ``ref`` and enter its resolution scope.
Exits the scope on exit of this context manager.
Arguments:
ref (str):
The reference to resolve
"""
url, resolved = self.resolve(ref)
self.push_scope(url)
try:
yield resolved
finally:
self.pop_scope()
def _finditem(self, schema, key):
results = []
if isinstance(schema, dict):
if key in schema:
results.append(schema)
for v in schema.values():
if isinstance(v, dict):
results += self._finditem(v, key)
return results
def resolve_local(self, url, schema):
"""
Resolve the given reference within the schema
"""
uri, fragment = urldefrag(url)
for subschema in self._finditem(schema, "$id"):
target_uri = self._urljoin_cache(
self.resolution_scope, subschema["$id"],
)
if target_uri.rstrip("/") == uri.rstrip("/"):
if fragment:
subschema = self.resolve_fragment(subschema, fragment)
return subschema
def resolve(self, ref):
"""
Resolve the given reference.
"""
url = self._urljoin_cache(self.resolution_scope, ref).rstrip("/")
local_resolve = self.resolve_local(url, self.referrer)
if local_resolve:
return url, local_resolve
return url, self._remote_cache(url)
def resolve_from_url(self, url):
"""
Resolve the given remote URL.
"""
url, fragment = urldefrag(url)
try:
document = self.store[url]
except KeyError:
try:
document = self.resolve_remote(url)
except Exception as exc:
raise exceptions.RefResolutionError(exc)
return self.resolve_fragment(document, fragment)
def resolve_fragment(self, document, fragment):
"""
Resolve a ``fragment`` within the referenced ``document``.
Arguments:
document:
The referent document
fragment (str):
a URI fragment to resolve within it
"""
fragment = fragment.lstrip("/")
if fragment:
for keyword in ["$anchor", "$dynamicAnchor"]:
for subschema in self._finditem(document, keyword):
if fragment == subschema[keyword]:
return subschema
# Resolve via path
parts = unquote(fragment).split("/") if fragment else []
for part in parts:
part = part.replace("~1", "/").replace("~0", "~")
if isinstance(document, Sequence):
# Array indexes should be turned into integers
try:
part = int(part)
except ValueError:
pass
try:
document = document[part]
except (TypeError, LookupError):
raise exceptions.RefResolutionError(
f"Unresolvable JSON pointer: {fragment!r}",
)
return document
def resolve_remote(self, uri):
"""
Resolve a remote ``uri``.
If called directly, does not check the store first, but after
retrieving the document at the specified URI it will be saved in
the store if :attr:`cache_remote` is True.
.. note::
If the requests_ library is present, ``jsonschema`` will use it to
request the remote ``uri``, so that the correct encoding is
detected and used.
If it isn't, or if the scheme of the ``uri`` is not ``http`` or
``https``, UTF-8 is assumed.
Arguments:
uri (str):
The URI to resolve
Returns:
The retrieved document
.. _requests: https://pypi.org/project/requests/
"""
try:
import requests
except ImportError:
requests = None
scheme = urlsplit(uri).scheme
if scheme in self.handlers:
result = self.handlers[scheme](uri)
elif scheme in ["http", "https"] and requests:
# Requests has support for detecting the correct encoding of
# json over http
result = requests.get(uri).json()
else:
# Otherwise, pass off to urllib and assume utf-8
with urlopen(uri) as url:
result = json.loads(url.read().decode("utf-8"))
if self.cache_remote:
self.store[uri] = result
return result
def validate(instance, schema, cls=None, *args, **kwargs):
"""
Validate an instance under the given schema.
>>> validate([2, 3, 4], {"maxItems": 2})
Traceback (most recent call last):
...
ValidationError: [2, 3, 4] is too long
:func:`validate` will first verify that the provided schema is
itself valid, since not doing so can lead to less obvious error
messages and fail in less obvious or consistent ways.
If you know you have a valid schema already, especially if you
intend to validate multiple instances with the same schema, you
likely would prefer using the `IValidator.validate` method directly
on a specific validator (e.g. ``Draft7Validator.validate``).
Arguments:
instance:
The instance to validate
schema:
The schema to validate with
cls (IValidator):
The class that will be used to validate the instance.
If the ``cls`` argument is not provided, two things will happen
in accordance with the specification. First, if the schema has a
:validator:`$schema` property containing a known meta-schema [#]_
then the proper validator will be used. The specification recommends
that all schemas contain :validator:`$schema` properties for this
reason. If no :validator:`$schema` property is found, the default
validator class is the latest released draft.
Any other provided positional and keyword arguments will be passed
on when instantiating the ``cls``.
Raises:
`jsonschema.exceptions.ValidationError` if the instance
is invalid
`jsonschema.exceptions.SchemaError` if the schema itself
is invalid
.. rubric:: Footnotes
.. [#] known by a validator registered with
`jsonschema.validators.validates`
"""
if cls is None:
cls = validator_for(schema)
cls.check_schema(schema)
validator = cls(schema, *args, **kwargs)
error = exceptions.best_match(validator.iter_errors(instance))
if error is not None:
raise error
def validator_for(schema, default=_LATEST_VERSION):
"""
Retrieve the validator class appropriate for validating the given schema.
Uses the :validator:`$schema` property that should be present in the
given schema to look up the appropriate validator class.
Arguments:
schema (collections.abc.Mapping or bool):
the schema to look at
default:
the default to return if the appropriate validator class
cannot be determined.
If unprovided, the default is to return the latest supported
draft.
"""
if schema is True or schema is False or "$schema" not in schema:
return default
if schema["$schema"] not in meta_schemas:
warn(
(
"The metaschema specified by $schema was not found. "
"Using the latest draft to validate, but this will raise "
"an error in the future."
),
DeprecationWarning,
stacklevel=2,
)
return meta_schemas.get(schema["$schema"], _LATEST_VERSION)
|
"""Flit's core machinery for building packages.
This package provides a standard PEP 517 API to build packages using Flit.
All the convenient development features live in the main 'flit' package.
"""
__version__ = '3.6.0'
|
import sys
import numpy as np
h, w = map(int, sys.stdin.readline().split())
s = np.array([list(sys.stdin.readline().rstrip()) for _ in range(h)], dtype='U')
s = np.pad(s, 1)
def main():
l = np.zeros((h+2, w+2), dtype=np.int64)
r = np.zeros((h+2, w+2), dtype=np.int64)
u = np.zeros((h+2, w+2), dtype=np.int64)
d = np.zeros((h+2, w+2), dtype=np.int64)
for i in range(1, w+1):
bl = s[:, i] == '#'
l[~bl, i] = l[~bl, i-1] + 1
i = w+1-i
bl = s[:, i] == '#'
r[~bl, i] = r[~bl, i+1] + 1
for i in range(1, h+1):
bl = s[i, :] == '#'
u[i, ~bl] = u[i-1, ~bl] + 1
i = h+1-i
bl = s[i, :] == '#'
d[i, ~bl] = d[i+1, ~bl] + 1
res = l + r + u + d - 3
return np.amax(res[1:h+1, 1:w+1])
if __name__ == '__main__':
ans = main()
print(ans)
|
"""stub action for replay
"""
from . import _actions as actions
from .run import Action as BaseAction
@actions.register
class Action(BaseAction):
# pylint: disable=too-many-instance-attributes
""":replay"""
KEGEX = r"""(?x)
^
(?P<replay>rep(?:lay)?
(\s(?P<params_replay>\S+))?)
$"""
|
##Ratings for SRNDNA
from psychopy import visual, core, event, gui, data, logging
import csv
import datetime
import random
import numpy
import os
import sys
#parameters
useFullScreen = True
useDualScreen=1
DEBUG = False
frame_rate=1
responseKeys=('1','2','3','z','enter','escape')
#subject ID
subjDlg=gui.Dlg(title="Investment Task Ratings")
subjDlg.addField('Enter Subject ID: ') #0
subjDlg.addField('Enter Gender (0 for male, 1 for female): ') #1
subjDlg.addField('Enter Ethnicity (0 for Caucasian, 1 for Other): ') #2
subjDlg.addField('Enter Age: ') #3
subjDlg.addField('Full Screen? (Enter lowercase: y or n):') #4
subjDlg.addField('Session (1/2): ') #5
subjDlg.show()
if gui.OK:
subj_id=subjDlg.data[0]
subj_gen=subjDlg.data[1]
subj_eth=subjDlg.data[2]
subj_age=subjDlg.data[3]
subj_session=subjDlg.data[5]
else:
sys.exit()
run_data = {
'Participant ID': subj_id,
'Date': str(datetime.datetime.now()),
'Description': 'Rej_Discount',
'Participant Gender': subj_gen,
'Participant Age': subj_age,
'Session (Pre/Post)': subj_session
}
#window setup
win = visual.Window([800,600], monitor="testMonitor", units="deg", fullscr=useFullScreen, allowGUI=False, screen=useDualScreen)
#checkpoint
print "got to check 1"
#first screen
instruct_screen = visual.TextStim(win, text='Please use the left and right arrow keys to move the slider along the scale. Press enter to record your answer to each of the following questions', pos = (0,0), units='norm', height = 0.1)
#image
#pictureStim = visual.ImageStim(win, pos=(0,5.5),size=(6.65,6.65))
#text
questionStim = visual.TextStim(win, font='Arial', pos = (0,-0.1), units='norm', height = 0.1)
#last screen
exit_screen = visual.TextStim(win, text='Thank you for participating!', pos = (0,0), units='norm', height = 0.1)
#logging
expdir = os.getcwd()
subjdir = '%s/logs/%s' % (expdir, subj_id)
if not os.path.exists(subjdir):
os.makedirs(subjdir)
log_file = os.path.join(subjdir,'sub{}_Rej_Discount-Ratings-{}.csv')
trial_data = [r for r in csv.DictReader(open('Rej_DiscountRatings.csv','rU'))]
trials = data.TrialHandler(trial_data[:], 1, method="sequential")
if subj_session == '1':
question_map = {
'3': 'your current mood',
'2': 'how you think you would feel being liked by someone',
'1': 'how you think you would feel not being liked by someone'
}
else:
question_map = {
'3': 'your current mood',
'2': 'your reaction to being liked',
'1': 'your reaction to not being liked'
}
#clock
globalClock = core.Clock()
logging.setDefaultClock(globalClock)
timer = core.Clock()
#main task
#instructions
instruct_screen.draw()
win.flip()
event.waitKeys(keyList=('space'))
for trial in trials:
#condition_label = stim_map[trial['Partner']]
#imagepath = os.path.join(expdir,'Images')
#image = os.path.join(imagepath,"%s.png") %condition_label
#pictureStim.setImage(image)
question_label = question_map[trial['Cue']]
#position = trial['position']
fileName=log_file.format(subj_id,subj_session)
# rating scale
scale = visual.RatingScale(win, low='-5', high='5', size=2,
markerStart='-5',marker='triangle', textSize=.5, showValue=True, scale = '-5 = negative, 0 = neutral, 5 = positive',
showAccept=True, noMouse=True, maxTime = 0.0, leftKeys = '1', rightKeys = '3', acceptKeys = '2', pos = (0, -0.55))
resp=[]
resp_val=None
resp_onset=None
trial_onset = globalClock.getTime()
while scale.noResponse:
scale.draw()
#pictureStim.draw()
questionStim.draw()
ratingQ = 'Please rate %s.' % question_label
questionStim.setText(ratingQ)
win.flip()
#resp = event.getKeys(keyList = responseKeys)
if len(resp)>0:
if resp[0] == 'escape':
trials.addData('Rating', scale.getRating())
os.chdir(subjdir)
trials.saveAsWideText(fileName)
os.chdir(expdir)
win.close()
core.quit()
trials.addData('Rating', scale.getRating())
os.chdir(subjdir)
trials.saveAsWideText(fileName)
os.chdir(expdir)
#last screen
exit_screen.draw()
win.flip()
event.waitKeys()
|
"""Manages state database used for checksum caching."""
import logging
import os
from abc import ABC, abstractmethod
from dvc.fs.local import LocalFileSystem
from dvc.hash_info import HashInfo
from dvc.utils import relpath
from dvc.utils.fs import get_inode, get_mtime_and_size, remove
logger = logging.getLogger(__name__)
class StateBase(ABC):
@abstractmethod
def close(self):
pass
@abstractmethod
def save(self, path, fs, hash_info):
pass
@abstractmethod
def get(self, path, fs):
pass
@abstractmethod
def save_link(self, path, fs):
pass
class StateNoop(StateBase):
def close(self):
pass
def save(self, path, fs, hash_info):
pass
def get(self, path, fs): # pylint: disable=unused-argument
return None, None
def save_link(self, path, fs):
pass
class State(StateBase): # pylint: disable=too-many-instance-attributes
def __init__(self, root_dir=None, tmp_dir=None, dvcignore=None):
from diskcache import Cache
super().__init__()
self.tmp_dir = tmp_dir
self.root_dir = root_dir
self.dvcignore = dvcignore
if not tmp_dir:
return
config = {
"eviction_policy": "least-recently-used",
"disk_pickle_protocol": 4,
}
self.links = Cache(directory=os.path.join(tmp_dir, "links"), **config)
self.md5s = Cache(directory=os.path.join(tmp_dir, "md5s"), **config)
def close(self):
self.md5s.close()
self.links.close()
def save(self, path, fs, hash_info):
"""Save hash for the specified path info.
Args:
path (str): path to save hash for.
hash_info (HashInfo): hash to save.
"""
if not isinstance(fs, LocalFileSystem):
return
mtime, size = get_mtime_and_size(path, fs, self.dvcignore)
inode = get_inode(path)
logger.debug(
"state save (%s, %s, %s) %s",
inode,
mtime,
str(size),
hash_info.value,
)
self.md5s[inode] = (mtime, str(size), hash_info.value)
def get(self, path, fs):
"""Gets the hash for the specified path info. Hash will be
retrieved from the state database if available.
Args:
path (str): path info to get the hash for.
Returns:
HashInfo or None: hash for the specified path info or None if it
doesn't exist in the state database.
"""
from .objects.meta import Meta
if not isinstance(fs, LocalFileSystem):
return None, None
try:
mtime, size = get_mtime_and_size(path, fs, self.dvcignore)
except FileNotFoundError:
return None, None
inode = get_inode(path)
value = self.md5s.get(inode)
if not value or value[0] != mtime or value[1] != str(size):
return None, None
return Meta(size=size), HashInfo("md5", value[2])
def save_link(self, path, fs):
"""Adds the specified path to the list of links created by dvc. This
list is later used on `dvc checkout` to cleanup old links.
Args:
path (str): path info to add to the list of links.
"""
if not isinstance(fs, LocalFileSystem):
return
try:
mtime, _ = get_mtime_and_size(path, fs, self.dvcignore)
except FileNotFoundError:
return
inode = get_inode(path)
relative_path = relpath(path, self.root_dir)
with self.links as ref:
ref[relative_path] = (inode, mtime)
def get_unused_links(self, used, fs):
"""Removes all saved links except the ones that are used.
Args:
used (list): list of used links that should not be removed.
"""
if not isinstance(fs, LocalFileSystem):
return
unused = []
with self.links as ref:
for relative_path in ref:
path = os.path.join(self.root_dir, relative_path)
if path in used or not fs.exists(path):
continue
inode = get_inode(path)
mtime, _ = get_mtime_and_size(path, fs, self.dvcignore)
if ref[relative_path] == (inode, mtime):
logger.debug("Removing '%s' as unused link.", path)
unused.append(relative_path)
return unused
def remove_links(self, unused, fs):
if not isinstance(fs, LocalFileSystem):
return
for path in unused:
remove(os.path.join(self.root_dir, path))
with self.links as ref:
for path in unused:
del ref[path]
|
from django.db import models
# Create your models here.
class Contact(models.Model):
name = models.CharField(max_length=50)
company = models.CharField(max_length=100, blank=True)
phone = models.CharField(max_length=11)
email = models.EmailField()
message = models.TextField()
def __str__(self):
if self.name is not None:
return self.name
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def nms_calculate(boxes, scores, iou_threshold, max_output_size, name='non_maximal_suppression'):
with tf.variable_scope(name):
nms_index = tf.image.non_max_suppression(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
name=name
)
return nms_index
|
"""Package configuration"""
import os
from setuptools import setup, find_packages
VERSION = "2.4.1"
README = """
pip-compile-multi
=================
Compile multiple requirements files to lock dependency versions.
Install
-------
.. code-block:: shell
pip install pip-compile-multi
Run
----
.. code-block:: shell
pip-compile-multi
Links
-----
* Documentation: https://pip-compile-multi.readthedocs.io/en/latest/
* Releases: https://pypi.python.org/pypi/pip-compile-multi
* Code: https://github.com/peterdemin/pip-compile-multi
* Issue tracker: https://github.com/peterdemin/pip-compile-multi/issues
"""
with open('HISTORY.rst') as fp:
HISTORY = fp.read().replace('.. :changelog:', '')
with open(os.path.join('requirements', 'base.in')) as fp:
REQUIREMENTS = list(fp)
CONSOLE_SCRIPTS = [
'pip-compile-multi = pipcompilemulti.cli_v1:cli',
]
if os.environ.get('PCM_ALPHA') == 'ON':
CONSOLE_SCRIPTS.append(
'requirements = pipcompilemulti.cli_v2:cli'
)
setup(
name='pip-compile-multi',
version=VERSION,
description="Compile multiple requirements files "
"to lock dependency versions",
long_description=README + '\n\n' + HISTORY,
author='Peter Demin',
author_email='peterdemin@gmail.com',
url='https://github.com/peterdemin/pip-compile-multi',
include_package_data=True,
packages=find_packages(exclude=['tests']),
install_requires=REQUIREMENTS,
python_requires='~=3.6',
license="MIT",
zip_safe=False,
keywords='pip-compile-multi',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Environment :: Console',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Utilities',
],
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
setup_requires=['setuptools', 'wheel'],
)
|
import sys
import pytest
import subprocess
from caproto.sync.client import read, write, subscribe, block
from .conftest import dump_process_output
def escape(pv_name, response):
raise KeyboardInterrupt
def fix_arg_prefixes(ioc, args):
'Add prefix to CLI argument pvnames where necessary'
return [ioc.pvs.get(arg, arg) for arg in args]
@pytest.mark.parametrize('func,args,kwargs',
[(read, ('__does_not_exist',), {}),
(write, ('__does_not_exist', 5), {}),
])
def test_timeout(func, args, kwargs):
with pytest.raises(TimeoutError):
func(*args, **kwargs)
def test_subscribe_timeout():
with pytest.raises(TimeoutError):
sub = subscribe('__does_not_exit')
sub.block()
with pytest.raises(TimeoutError):
sub = subscribe('__does_not_exit')
block(sub)
def _subprocess_communicate(process, command, timeout=10.0):
stdout, stderr = process.communicate(timeout=timeout)
dump_process_output(command, stdout, stderr)
assert process.poll() == 0
@pytest.mark.parametrize('more_kwargs,',
[{'repeater': False},
{'timeout': 3},
{'notify': True},
]
)
@pytest.mark.parametrize('func,args,kwargs',
[(read, ('float',), {}),
(write, ('float', 3.16), {}),
(write, ('float', '3.16'), {'data_type': 0}),
])
def test_options(func, args, kwargs, more_kwargs, ioc):
args = fix_arg_prefixes(ioc, args)
kwargs.update(more_kwargs)
func(*args, **kwargs)
@pytest.mark.parametrize('more_kwargs,',
[{'repeater': False},
{'timeout': 3},
]
)
def test_subscribe_options(more_kwargs, ioc):
args = ('float',)
args = fix_arg_prefixes(ioc, args)
sub = subscribe(*args)
block(sub, duration=0.5, **more_kwargs)
fmt1 = '{response.data[0]}'
fmt2 = '{timestamp:%%H:%%M}'
# Skip the long-running ones.
@pytest.mark.parametrize('command,args',
[('caproto-get', ('-h',)),
('caproto-put', ('-h',)),
('caproto-monitor', ('-h',)),
('caproto-get', ('--list-types',)),
('caproto-get', ('float',)),
('caproto-get', ('float', 'str')),
# data_type as int, enum name, class on type
('caproto-get', ('float', '-d', '0')),
('caproto-get', ('float', '-d', 'STRING')),
('caproto-get', ('float', '-d', 'string')),
('caproto-get', ('float', '-d', 'CONTROL')),
('caproto-get', ('float', '-d', 'control')),
('caproto-get', ('float', '--format', fmt1)),
('caproto-get', ('float', '--format', fmt2)),
('caproto-get', ('enum',)),
('caproto-get', ('enum', '-n')),
('caproto-get', ('float', '-n')), # no effect
('caproto-get', ('float', '--no-repeater')),
('caproto-get', ('float', '-p', '0')),
('caproto-get', ('float', '-p', '99')),
('caproto-get', ('float', '-t')),
('caproto-get', ('float', '-l')),
('caproto-get', ('float', '-v')),
('caproto-get', ('float', '-vvv')),
('caproto-put', ('float', '3.16')),
('caproto-put', ('float', '3.16', '--format', fmt1)),
('caproto-put', ('float', '3.16', '--format', fmt2)),
('caproto-put', ('float', '3.16', '--no-repeater')),
('caproto-put', ('float', '3.16', '-p', '0')),
('caproto-put', ('float', '3.16', '-p', '99')),
('caproto-put', ('float', '3.16', '-t')),
('caproto-put', ('float', '3.16', '-l')),
('caproto-put', ('float', '3.16', '-v')),
('caproto-put', ('float', '3.16', '-vvv')),
])
def test_cli(command, args, ioc):
args = fix_arg_prefixes(ioc, args)
p = subprocess.Popen([sys.executable, '-um', 'caproto.tests.example_runner',
'--script', command] + list(args),
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
_subprocess_communicate(p, command, timeout=10.0)
@pytest.mark.parametrize('args',
[('float', '--format', fmt1),
('float', '--format', fmt2),
('float', '--format', '{timedelta}'),
('float', '-m va'),
('float', '-m valp'),
('float', '-m v'),
('enum',),
('enum', '-n'),
('float', '-n'), # should have no effect
('float', '--no-repeater'),
('float', '-p', '0'),
('float', '-p', '99'),
('float', '-w', '2'),
])
def test_monitor(args, ioc):
args = fix_arg_prefixes(ioc, args)
if sys.platform == 'win32':
si = subprocess.STARTUPINFO()
si.dwFlags = (subprocess.STARTF_USESTDHANDLES |
subprocess.CREATE_NEW_PROCESS_GROUP)
os_kwargs = dict(startupinfo=si)
else:
os_kwargs = {}
# For the purposes of this test, one monitor output is sufficient
args += ['--maximum', '1']
p = subprocess.Popen([sys.executable, '-um', 'caproto.tests.example_runner',
'--script', 'caproto-monitor'] + list(args),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**os_kwargs)
_subprocess_communicate(p, 'camonitor', timeout=2.0)
|
import os
from tempfile import mkdtemp
basedir = os.path.abspath(os.path.dirname(__file__))
db_path = os.path.join(basedir, 'app', 'db')
db_file = 'production.db'
db_fullpath = os.path.join(db_path, db_file)
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + db_fullpath
SQLALCHEMY_TRACK_MODIFICATIONS = False # to disable annoying warnings
SESSION_FILE_DIR = mkdtemp()
SESSION_PERMANENT = False
SESSION_TYPE = "filesystem"
|
import pymysql.cursors
from model.group import Group
from model.contact import Contact
from model.contact_in_group import Contact_in_Group
class DbFixture:
def __init__(self, host, name, user, password):
self.host = host
self.name = name
self.user = user
self.password = password
self.connection = pymysql.connect(host=host, database=name, user=user, password=password, autocommit=True)
def get_group_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select group_id, group_name, group_header, group_footer from group_list")
for row in cursor:
(id, name, header, footer) = row
list.append(Group(id=str(id), name=name, header=header, footer=footer))
finally:
cursor.close()
return list
def get_contact_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select id, firstname, lastname, email, address ,home, mobile, work, email2, email3,"
" phone2 from addressbook where deprecated='0000-00-00 00:00:00'")
for row in cursor:
(id, firstname, lastname, EMail, address, home_phone, mob_phone, work_phone, EMail2, EMail3,
phone2) = row
list.append(Contact(id=str(id), firstname=firstname, lastname=lastname, EMail=EMail, address=address,
home_phone=home_phone, mob_phone=mob_phone, work_phone=work_phone,
EMail2=EMail2, EMail3=EMail3, second_phone=phone2))
finally:
cursor.close()
return list
def get_contact_in_group_list(self):
list_contact = []
cursor = self.connection.cursor()
try:
cursor.execute("select id, group_id from address_in_groups where deprecated is Null")
for row in cursor:
(id_contact, id_group) = row
list_contact.append(Contact_in_Group(id=str(id_contact), group_id=str(id_group)))
finally:
cursor.close()
return list_contact
def destroy(self):
self.connection.close()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNamespaceResult',
'AwaitableGetNamespaceResult',
'get_namespace',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:eventhub:getNamespace'.""", DeprecationWarning)
@pulumi.output_type
class GetNamespaceResult:
"""
Single Namespace item in List or Get Operation
"""
def __init__(__self__, created_at=None, id=None, is_auto_inflate_enabled=None, kafka_enabled=None, location=None, maximum_throughput_units=None, metric_id=None, name=None, provisioning_state=None, service_bus_endpoint=None, sku=None, tags=None, type=None, updated_at=None):
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_auto_inflate_enabled and not isinstance(is_auto_inflate_enabled, bool):
raise TypeError("Expected argument 'is_auto_inflate_enabled' to be a bool")
pulumi.set(__self__, "is_auto_inflate_enabled", is_auto_inflate_enabled)
if kafka_enabled and not isinstance(kafka_enabled, bool):
raise TypeError("Expected argument 'kafka_enabled' to be a bool")
pulumi.set(__self__, "kafka_enabled", kafka_enabled)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if maximum_throughput_units and not isinstance(maximum_throughput_units, int):
raise TypeError("Expected argument 'maximum_throughput_units' to be a int")
pulumi.set(__self__, "maximum_throughput_units", maximum_throughput_units)
if metric_id and not isinstance(metric_id, str):
raise TypeError("Expected argument 'metric_id' to be a str")
pulumi.set(__self__, "metric_id", metric_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if service_bus_endpoint and not isinstance(service_bus_endpoint, str):
raise TypeError("Expected argument 'service_bus_endpoint' to be a str")
pulumi.set(__self__, "service_bus_endpoint", service_bus_endpoint)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if updated_at and not isinstance(updated_at, str):
raise TypeError("Expected argument 'updated_at' to be a str")
pulumi.set(__self__, "updated_at", updated_at)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> str:
"""
The time the Namespace was created.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isAutoInflateEnabled")
def is_auto_inflate_enabled(self) -> Optional[bool]:
"""
Value that indicates whether AutoInflate is enabled for eventhub namespace.
"""
return pulumi.get(self, "is_auto_inflate_enabled")
@property
@pulumi.getter(name="kafkaEnabled")
def kafka_enabled(self) -> Optional[bool]:
"""
Value that indicates whether Kafka is enabled for eventhub namespace.
"""
return pulumi.get(self, "kafka_enabled")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maximumThroughputUnits")
def maximum_throughput_units(self) -> Optional[int]:
"""
Upper limit of throughput units when AutoInflate is enabled, value should be within 0 to 20 throughput units. ( '0' if AutoInflateEnabled = true)
"""
return pulumi.get(self, "maximum_throughput_units")
@property
@pulumi.getter(name="metricId")
def metric_id(self) -> str:
"""
Identifier for Azure Insights metrics.
"""
return pulumi.get(self, "metric_id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the Namespace.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="serviceBusEndpoint")
def service_bus_endpoint(self) -> str:
"""
Endpoint you can use to perform Service Bus operations.
"""
return pulumi.get(self, "service_bus_endpoint")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
Properties of sku resource
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> str:
"""
The time the Namespace was updated.
"""
return pulumi.get(self, "updated_at")
class AwaitableGetNamespaceResult(GetNamespaceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNamespaceResult(
created_at=self.created_at,
id=self.id,
is_auto_inflate_enabled=self.is_auto_inflate_enabled,
kafka_enabled=self.kafka_enabled,
location=self.location,
maximum_throughput_units=self.maximum_throughput_units,
metric_id=self.metric_id,
name=self.name,
provisioning_state=self.provisioning_state,
service_bus_endpoint=self.service_bus_endpoint,
sku=self.sku,
tags=self.tags,
type=self.type,
updated_at=self.updated_at)
def get_namespace(namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNamespaceResult:
"""
Single Namespace item in List or Get Operation
Latest API Version: 2017-04-01.
:param str namespace_name: The Namespace name
:param str resource_group_name: Name of the resource group within the azure subscription.
"""
pulumi.log.warn("get_namespace is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:eventhub:getNamespace'.")
__args__ = dict()
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:eventhub/latest:getNamespace', __args__, opts=opts, typ=GetNamespaceResult).value
return AwaitableGetNamespaceResult(
created_at=__ret__.created_at,
id=__ret__.id,
is_auto_inflate_enabled=__ret__.is_auto_inflate_enabled,
kafka_enabled=__ret__.kafka_enabled,
location=__ret__.location,
maximum_throughput_units=__ret__.maximum_throughput_units,
metric_id=__ret__.metric_id,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
service_bus_endpoint=__ret__.service_bus_endpoint,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type,
updated_at=__ret__.updated_at)
|
from pyspedas.erg.load import load
def lepe(trange=['2017-04-04', '2017-04-05'],
datatype='omniflux',
level='l2',
suffix='',
get_support_data=False,
varformat=None,
varnames=[],
downloadonly=False,
notplot=False,
no_update=False,
uname=None,
passwd=None,
time_clip=False):
"""
This function loads data from the LEP-e experiment from the Arase mission
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
datatype: str
Data type; Valid options:
level: str
Data level; Valid options:
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
downloadonly: bool
Set this flag to download the CDF files, but not load them into
tplot variables
notplot: bool
Return the data in hash tables instead of creating tplot variables
no_update: bool
If set, only load data from your local cache
time_clip: bool
Time clip the variables to exactly the range specified in the trange keyword
Returns:
List of tplot variables created.
"""
return load(instrument='lepe', trange=trange, level=level, datatype=datatype, suffix=suffix, get_support_data=get_support_data, varformat=varformat, varnames=varnames, downloadonly=downloadonly, notplot=notplot, time_clip=time_clip, no_update=no_update, uname=uname, passwd=passwd)
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import pandas as pd
import os
data_list = [
'fp16_notc',
'fp16_tc_nocor',
'fp32_notc',
'fp32_tc_nocor',
'fp32_tc_cor',
'mixed_tc_cor_emu',
'tf32_tc_nocor_emu',
'tf32_tc_cor_emu',
]
def get_color(d):
if 'fp32' in d:
return '#51318f'
if 'fp16' in d:
return '#006c3a'
if 'tf32' in d:
return '#ed6c00'
if 'mixed' in d:
return '#333132'
return '#ffffff'
def get_linestyle(d):
if 'notc' in d:
return '-'
if 'nocor' in d:
return '-.'
if 'cor' in d:
return ':'
return '--'
fig, ((ax0, ax1)) = plt.subplots(1, 2, figsize=(6, 3))
def draw_time_graph(ax, y_label):
ax.grid()
ax.set_xlabel('Matrix size $m \\times 16$ : $m$')
ax.set_xlim([2**9, 2**26])
ax.set_xscale('log', basex=2)
ax.set_ylabel(y_label)
ax.set_ylim(1e-4, 6.1e1)
ax.set_yscale('log')
ax.set_yticks([1e-4, 1e-3, 1e-2, 1e-1, 1, 10])
ax.set_facecolor('white')
background = patches.Rectangle(xy=(2**9, 1e-6), width=2**26, height=1, fc='#ffffff', fill=True)
ax.add_patch(background)
df = pd.read_csv('performance.csv')
line_list = []
label_list = []
for d in data_list:
data = df.query("compute_mode=='" + d + "'")
l = ax.plot(data['m'], data[y_label], linewidth=2, marker='*', markersize=4, color=get_color(d), linestyle=get_linestyle(d))
line_list += [l]
label_list += [d]
return line_list, label_list
def draw_performance_graph(ax, y_label):
ax.grid()
ax.set_xlabel('Matrix size $m \\times 16$ : $m$')
ax.set_xlim([2**9, 2**26])
ax.set_xscale('log', basex=2)
ax.set_ylabel(y_label)
ax.set_ylim(0, 16)
ax.set_facecolor('white')
background = patches.Rectangle(xy=(2**9, 1e-6), width=2**26, height=1, fc='#ffffff', fill=True)
ax.add_patch(background)
df = pd.read_csv('performance.csv')
line_list = []
label_list = []
for d in data_list:
data = df.query("compute_mode=='" + d + "'")
l = ax.plot(data['m'], data[y_label], linewidth=2, marker='*', markersize=4, color=get_color(d), linestyle=get_linestyle(d))
line_list += [l]
label_list += [d]
return line_list, label_list
draw_time_graph(ax0, 'elapsed_time')
line_list, label_list = draw_performance_graph(ax1, 'tflops')
fig.legend(line_list,
labels=label_list,
loc='upper center',
ncol=3,
bbox_to_anchor=(1.14, 1.4),
bbox_transform=ax0.transAxes
)
plt.tight_layout()
plt.savefig("performance.pdf", bbox_inches="tight", transparent=True)
|
from pymir.analytics.key_detection.musicnet.ml.note_sequence.base import random_forest
import argparse
import textwrap
def compute(n_estimators=100):
"""
Base model of key detection for
Musicnet metadata based in TF-IDF and Random Forest
"""
random_forest.compute(n_estimators=n_estimators)
def run():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(compute.__doc__))
parser.add_argument(
'--n_estimators', help='Number of estimators for Random Forest',
type=int, default=100, required=False)
parser.add_argument(
'--ngram_size', help='Size of ngrams',
type=int, default=5, required=False)
args, extra_params = parser.parse_known_args()
random_forest.compute(n_estimators=args.n_estimators, ngram_size=args.ngram_size)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:owefsad
# software: PyCharm
# project: lingzhi-webapi
from rest_framework import serializers
from dongtai.models import User
class UserSerializer(serializers.ModelSerializer):
department = serializers.SerializerMethodField()
talent = serializers.SerializerMethodField()
class Meta:
model = User
fields = ['id', 'username', 'email', 'is_superuser', 'phone', 'talent', 'department', 'is_active',
'date_joined', 'last_login']
def get_department(self, obj):
department = obj.department.filter().first()
return {'name': department.get_department_name(), 'id': department.id} if department else {'name': '', 'id': -1}
def get_talent(self, obj):
talent = obj.get_talent()
return talent.get_talent_name() if talent else ''
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import imagekit.models.fields
import users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='avatar',
field=imagekit.models.fields.ProcessedImageField(upload_to=users.models.generate_avatar_filename),
),
]
|
# This file is part of the Python aiocoap library project.
#
# Copyright (c) 2012-2014 Maciej Wasilak <http://sixpinetrees.blogspot.com/>,
# 2013-2014 Christian Amsüss <c.amsuess@energyharvesting.at>
#
# aiocoap is free software, this file is published under the MIT license as
# described in the accompanying LICENSE file.
import functools
class AwaitOrAenter:
"""Helper to wrap around coroutines to make them usable either with
``await c`` (possibly later with an asynchronous context manager)
or with ``async with c as ...:`` without the extra await."""
def __init__(self, coro):
self.__coro = coro
def __await__(self):
return self.__coro.__await__()
async def __aenter__(self):
self.__managed = await self.__coro
return await self.__managed.__aenter__()
async def __aexit__(self, exc_type, exc_value, traceback):
return await self.__managed.__aexit__(exc_type, exc_value, traceback)
@classmethod
def decorate(cls, coroutine):
@functools.wraps(coroutine)
def decorated(*args, **kwargs):
coro = coroutine(*args, **kwargs)
return cls(coro)
return decorated
|
import glob
import inspect
import os
import re
import warnings
from collections import namedtuple
from stat import ST_CTIME
import numpy as np
from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch
from yt.data_objects.static_output import Dataset
from yt.funcs import ensure_tuple, mylog, setdefaultattr
from yt.geometry.grid_geometry_handler import GridIndex
from yt.utilities.io_handler import io_registry
from yt.utilities.lib.misc_utilities import get_box_grids_level
from yt.utilities.parallel_tools.parallel_analysis_interface import parallel_root_only
from .fields import (
BoxlibFieldInfo,
CastroFieldInfo,
MaestroFieldInfo,
NyxFieldInfo,
WarpXFieldInfo,
)
# This is what we use to find scientific notation that might include d's
# instead of e's.
_scinot_finder = re.compile(r"[-+]?[0-9]*\.?[0-9]+([eEdD][-+]?[0-9]+)?")
# This is the dimensions in the Cell_H file for each level
# It is different for different dimensionalities, so we make a list
_dim_finder = [
re.compile(r"\(\((\d+)\) \((\d+)\) \(\d+\)\)$"),
re.compile(r"\(\((\d+,\d+)\) \((\d+,\d+)\) \(\d+,\d+\)\)$"),
re.compile(r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)$"),
]
# This is the line that prefixes each set of data for a FAB in the FAB file
# It is different for different dimensionalities, so we make a list
_endian_regex = r"^FAB \(\(\d+, \([0-9 ]+\)\),\((\d+), \(([0-9 ]+)\)\)\)"
_header_pattern = [
re.compile(_endian_regex + r"\(\((\d+)\) \((\d+)\) \((\d+)\)\) (\d+)\n"),
re.compile(
_endian_regex + r"\(\((\d+,\d+)\) \((\d+,\d+)\) \((\d+,\d+)\)\) (\d+)\n"
),
re.compile(
_endian_regex
+ r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n"
),
]
class BoxlibGrid(AMRGridPatch):
_id_offset = 0
_offset = -1
def __init__(self, grid_id, offset, filename=None, index=None):
super(BoxlibGrid, self).__init__(grid_id, filename, index)
self._base_offset = offset
self._parent_id = []
self._children_ids = []
self._pdata = {}
def _prepare_grid(self):
super(BoxlibGrid, self)._prepare_grid()
my_ind = self.id - self._id_offset
self.start_index = self.index.grid_start_index[my_ind]
def get_global_startindex(self):
return self.start_index
def _setup_dx(self):
# has already been read in and stored in index
self.dds = self.index.ds.arr(self.index.level_dds[self.Level, :], "code_length")
self.field_data["dx"], self.field_data["dy"], self.field_data["dz"] = self.dds
def __repr__(self):
return "BoxlibGrid_%04i" % (self.id)
@property
def Parent(self):
if len(self._parent_id) == 0:
return None
return [self.index.grids[pid - self._id_offset] for pid in self._parent_id]
@property
def Children(self):
return [self.index.grids[cid - self._id_offset] for cid in self._children_ids]
def _get_offset(self, f):
# This will either seek to the _offset or figure out the correct
# _offset.
if self._offset == -1:
f.seek(self._base_offset, os.SEEK_SET)
f.readline()
self._offset = f.tell()
return self._offset
# We override here because we can have varying refinement levels
def select_ires(self, dobj):
mask = self._get_selector_mask(dobj.selector)
if mask is None:
return np.empty(0, dtype="int64")
coords = np.empty(self._last_count, dtype="int64")
coords[:] = self.Level + self.ds.level_offsets[self.Level]
return coords
# Override this as well, since refine_by can vary
def _fill_child_mask(self, child, mask, tofill, dlevel=1):
rf = self.ds.ref_factors[self.Level]
if dlevel != 1:
raise NotImplementedError
gi, cgi = self.get_global_startindex(), child.get_global_startindex()
startIndex = np.maximum(0, cgi // rf - gi)
endIndex = np.minimum(
(cgi + child.ActiveDimensions) // rf - gi, self.ActiveDimensions
)
endIndex += startIndex == endIndex
mask[
startIndex[0] : endIndex[0],
startIndex[1] : endIndex[1],
startIndex[2] : endIndex[2],
] = tofill
class BoxLibParticleHeader:
def __init__(self, ds, directory_name, is_checkpoint, extra_field_names=None):
self.particle_type = directory_name
header_filename = os.path.join(ds.output_dir, directory_name, "Header")
with open(header_filename, "r") as f:
self.version_string = f.readline().strip()
particle_real_type = self.version_string.split("_")[-1]
known_real_types = {"double": np.float64, "single": np.float32}
try:
self.real_type = known_real_types[particle_real_type]
except KeyError:
warnings.warn(
f"yt did not recognize particle real type {particle_real_type}"
"assuming double",
category=RuntimeWarning,
)
self.real_type = known_real_types["double"]
self.int_type = np.int32
self.dim = int(f.readline().strip())
self.num_int_base = 2 + self.dim
self.num_real_base = self.dim
self.num_int_extra = 0 # this should be written by Boxlib, but isn't
self.num_real_extra = int(f.readline().strip())
self.num_int = self.num_int_base + self.num_int_extra
self.num_real = self.num_real_base + self.num_real_extra
self.num_particles = int(f.readline().strip())
self.max_next_id = int(f.readline().strip())
self.finest_level = int(f.readline().strip())
self.num_levels = self.finest_level + 1
# Boxlib particles can be written in checkpoint or plotfile mode
# The base integer fields are only there for checkpoints, but some
# codes use the checkpoint format for plotting
if not is_checkpoint:
self.num_int_base = 0
self.num_int_extra = 0
self.num_int = 0
self.grids_per_level = np.zeros(self.num_levels, dtype="int64")
self.data_map = {}
for level_num in range(self.num_levels):
self.grids_per_level[level_num] = int(f.readline().strip())
self.data_map[level_num] = {}
pfd = namedtuple(
"ParticleFileDescriptor", ["file_number", "num_particles", "offset"]
)
for level_num in range(self.num_levels):
for grid_num in range(self.grids_per_level[level_num]):
entry = [int(val) for val in f.readline().strip().split()]
self.data_map[level_num][grid_num] = pfd(*entry)
self._generate_particle_fields(extra_field_names)
def _generate_particle_fields(self, extra_field_names):
# these are the 'base' integer fields
self.known_int_fields = [
(self.particle_type, "particle_id"),
(self.particle_type, "particle_cpu"),
(self.particle_type, "particle_cell_x"),
(self.particle_type, "particle_cell_y"),
(self.particle_type, "particle_cell_z"),
]
self.known_int_fields = self.known_int_fields[0 : self.num_int_base]
# these are extra integer fields
extra_int_fields = [
"particle_int_comp%d" % i for i in range(self.num_int_extra)
]
self.known_int_fields.extend(
[(self.particle_type, field) for field in extra_int_fields]
)
# these are the base real fields
self.known_real_fields = [
(self.particle_type, "particle_position_x"),
(self.particle_type, "particle_position_y"),
(self.particle_type, "particle_position_z"),
]
self.known_real_fields = self.known_real_fields[0 : self.num_real_base]
# these are the extras
if extra_field_names is not None:
assert len(extra_field_names) == self.num_real_extra
else:
extra_field_names = [
"particle_real_comp%d" % i for i in range(self.num_real_extra)
]
self.known_real_fields.extend(
[(self.particle_type, field) for field in extra_field_names]
)
self.known_fields = self.known_int_fields + self.known_real_fields
self.particle_int_dtype = np.dtype(
[(t[1], self.int_type) for t in self.known_int_fields]
)
self.particle_real_dtype = np.dtype(
[(t[1], self.real_type) for t in self.known_real_fields]
)
class AMReXParticleHeader:
def __init__(self, ds, directory_name, is_checkpoint, extra_field_names=None):
self.particle_type = directory_name
header_filename = os.path.join(ds.output_dir, directory_name, "Header")
self.real_component_names = []
self.int_component_names = []
with open(header_filename, "r") as f:
self.version_string = f.readline().strip()
particle_real_type = self.version_string.split("_")[-1]
if particle_real_type == "double":
self.real_type = np.float64
elif particle_real_type == "single":
self.real_type = np.float32
else:
raise RuntimeError("yt did not recognize particle real type.")
self.int_type = np.int32
self.dim = int(f.readline().strip())
self.num_int_base = 2
self.num_real_base = self.dim
self.num_real_extra = int(f.readline().strip())
for _ in range(self.num_real_extra):
self.real_component_names.append(f.readline().strip())
self.num_int_extra = int(f.readline().strip())
for _ in range(self.num_int_extra):
self.int_component_names.append(f.readline().strip())
self.num_int = self.num_int_base + self.num_int_extra
self.num_real = self.num_real_base + self.num_real_extra
self.is_checkpoint = bool(int(f.readline().strip()))
self.num_particles = int(f.readline().strip())
self.max_next_id = int(f.readline().strip())
self.finest_level = int(f.readline().strip())
self.num_levels = self.finest_level + 1
if not self.is_checkpoint:
self.num_int_base = 0
self.num_int_extra = 0
self.num_int = 0
self.grids_per_level = np.zeros(self.num_levels, dtype="int64")
self.data_map = {}
for level_num in range(self.num_levels):
self.grids_per_level[level_num] = int(f.readline().strip())
self.data_map[level_num] = {}
pfd = namedtuple(
"ParticleFileDescriptor", ["file_number", "num_particles", "offset"]
)
for level_num in range(self.num_levels):
for grid_num in range(self.grids_per_level[level_num]):
entry = [int(val) for val in f.readline().strip().split()]
self.data_map[level_num][grid_num] = pfd(*entry)
self._generate_particle_fields()
def _generate_particle_fields(self):
# these are the 'base' integer fields
self.known_int_fields = [
(self.particle_type, "particle_id"),
(self.particle_type, "particle_cpu"),
]
self.known_int_fields = self.known_int_fields[0 : self.num_int_base]
self.known_int_fields.extend(
[
(self.particle_type, "particle_" + field)
for field in self.int_component_names
]
)
# these are the base real fields
self.known_real_fields = [
(self.particle_type, "particle_position_x"),
(self.particle_type, "particle_position_y"),
(self.particle_type, "particle_position_z"),
]
self.known_real_fields = self.known_real_fields[0 : self.num_real_base]
self.known_real_fields.extend(
[
(self.particle_type, "particle_" + field)
for field in self.real_component_names
]
)
self.known_fields = self.known_int_fields + self.known_real_fields
self.particle_int_dtype = np.dtype(
[(t[1], self.int_type) for t in self.known_int_fields]
)
self.particle_real_dtype = np.dtype(
[(t[1], self.real_type) for t in self.known_real_fields]
)
class BoxlibHierarchy(GridIndex):
grid = BoxlibGrid
def __init__(self, ds, dataset_type="boxlib_native"):
self.dataset_type = dataset_type
self.header_filename = os.path.join(ds.output_dir, "Header")
self.directory = ds.output_dir
self.particle_headers = {}
GridIndex.__init__(self, ds, dataset_type)
self._cache_endianness(self.grids[-1])
def _parse_index(self):
"""
read the global header file for an Boxlib plotfile output.
"""
self.max_level = self.dataset._max_level
header_file = open(self.header_filename, "r")
self.dimensionality = self.dataset.dimensionality
_our_dim_finder = _dim_finder[self.dimensionality - 1]
DRE = self.dataset.domain_right_edge # shortcut
DLE = self.dataset.domain_left_edge # shortcut
# We can now skip to the point in the file we want to start parsing.
header_file.seek(self.dataset._header_mesh_start)
dx = []
for i in range(self.max_level + 1):
dx.append([float(v) for v in next(header_file).split()])
# account for non-3d data sets
if self.dimensionality < 2:
dx[i].append(DRE[1] - DLE[1])
if self.dimensionality < 3:
dx[i].append(DRE[2] - DLE[1])
self.level_dds = np.array(dx, dtype="float64")
next(header_file)
if self.ds.geometry == "cartesian":
default_ybounds = (0.0, 1.0)
default_zbounds = (0.0, 1.0)
elif self.ds.geometry == "cylindrical":
# Now we check for dimensionality issues
if self.dimensionality != 2:
raise RuntimeError("yt needs cylindrical to be 2D")
self.level_dds[:, 2] = 2 * np.pi
default_zbounds = (0.0, 2 * np.pi)
elif self.ds.geometry == "spherical":
# BoxLib only supports 1D spherical, so ensure
# the other dimensions have the right extent.
self.level_dds[:, 1] = np.pi
self.level_dds[:, 2] = 2 * np.pi
default_ybounds = (0.0, np.pi)
default_zbounds = (0.0, 2 * np.pi)
else:
raise RuntimeError("Unknown BoxLib coordinate system.")
if int(next(header_file)) != 0:
raise RuntimeError("INTERNAL ERROR! This should be a zero.")
# each level is one group with ngrids on it.
# each grid has self.dimensionality number of lines of 2 reals
self.grids = []
grid_counter = 0
for level in range(self.max_level + 1):
vals = next(header_file).split()
lev, ngrids = int(vals[0]), int(vals[1])
assert lev == level
nsteps = int(next(header_file)) # NOQA
for gi in range(ngrids):
xlo, xhi = [float(v) for v in next(header_file).split()]
if self.dimensionality > 1:
ylo, yhi = [float(v) for v in next(header_file).split()]
else:
ylo, yhi = default_ybounds
if self.dimensionality > 2:
zlo, zhi = [float(v) for v in next(header_file).split()]
else:
zlo, zhi = default_zbounds
self.grid_left_edge[grid_counter + gi, :] = [xlo, ylo, zlo]
self.grid_right_edge[grid_counter + gi, :] = [xhi, yhi, zhi]
# Now we get to the level header filename, which we open and parse.
fn = os.path.join(self.dataset.output_dir, next(header_file).strip())
level_header_file = open(fn + "_H")
level_dir = os.path.dirname(fn)
# We skip the first two lines, which contain BoxLib header file
# version and 'how' the data was written
next(level_header_file)
next(level_header_file)
# Now we get the number of components
ncomp_this_file = int(next(level_header_file)) # NOQA
# Skip the next line, which contains the number of ghost zones
next(level_header_file)
# To decipher this next line, we expect something like:
# (8 0
# where the first is the number of FABs in this level.
ngrids = int(next(level_header_file).split()[0][1:])
# Now we can iterate over each and get the indices.
for gi in range(ngrids):
# components within it
start, stop = _our_dim_finder.match(next(level_header_file)).groups()
# fix for non-3d data
# note we append '0' to both ends b/c of the '+1' in dims below
start += ",0" * (3 - self.dimensionality)
stop += ",0" * (3 - self.dimensionality)
start = np.array(start.split(","), dtype="int64")
stop = np.array(stop.split(","), dtype="int64")
dims = stop - start + 1
self.grid_dimensions[grid_counter + gi, :] = dims
self.grid_start_index[grid_counter + gi, :] = start
# Now we read two more lines. The first of these is a close
# parenthesis.
next(level_header_file)
# The next is again the number of grids
next(level_header_file)
# Now we iterate over grids to find their offsets in each file.
for gi in range(ngrids):
# Now we get the data file, at which point we're ready to
# create the grid.
dummy, filename, offset = next(level_header_file).split()
filename = os.path.join(level_dir, filename)
go = self.grid(grid_counter + gi, int(offset), filename, self)
go.Level = self.grid_levels[grid_counter + gi, :] = level
self.grids.append(go)
grid_counter += ngrids
# already read the filenames above...
self.float_type = "float64"
def _cache_endianness(self, test_grid):
"""
Cache the endianness and bytes perreal of the grids by using a
test grid and assuming that all grids have the same
endianness. This is a pretty safe assumption since Boxlib uses
one file per processor, and if you're running on a cluster
with different endian processors, then you're on your own!
"""
# open the test file & grab the header
with open(os.path.expanduser(test_grid.filename), "rb") as f:
header = f.readline().decode("ascii", "ignore")
bpr, endian, start, stop, centering, nc = (
_header_pattern[self.dimensionality - 1].search(header).groups()
)
# Note that previously we were using a different value for BPR than we
# use now. Here is an example set of information directly from BoxLib
"""
* DOUBLE data
* FAB ((8, (64 11 52 0 1 12 0 1023)),(8, (1 2 3 4 5 6 7 8)))((0,0) (63,63) (0,0)) 27 # NOQA: E501
* FLOAT data
* FAB ((8, (32 8 23 0 1 9 0 127)),(4, (1 2 3 4)))((0,0) (63,63) (0,0)) 27
"""
if bpr == endian[0]:
dtype = f"<f{bpr}"
elif bpr == endian[-1]:
dtype = f">f{bpr}"
else:
raise ValueError(
"FAB header is neither big nor little endian. "
"Perhaps the file is corrupt?"
)
mylog.debug("FAB header suggests dtype of %s", dtype)
self._dtype = np.dtype(dtype)
def _populate_grid_objects(self):
mylog.debug("Creating grid objects")
self.grids = np.array(self.grids, dtype="object")
self._reconstruct_parent_child()
for i, grid in enumerate(self.grids):
if (i % 1e4) == 0:
mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
grid._prepare_grid()
grid._setup_dx()
mylog.debug("Done creating grid objects")
def _reconstruct_parent_child(self):
if self.max_level == 0:
return
mask = np.empty(len(self.grids), dtype="int32")
mylog.debug("First pass; identifying child grids")
for i, grid in enumerate(self.grids):
get_box_grids_level(
self.grid_left_edge[i, :],
self.grid_right_edge[i, :],
self.grid_levels[i] + 1,
self.grid_left_edge,
self.grid_right_edge,
self.grid_levels,
mask,
)
ids = np.where(mask.astype("bool")) # where is a tuple
grid._children_ids = ids[0] + grid._id_offset
mylog.debug("Second pass; identifying parents")
for i, grid in enumerate(self.grids): # Second pass
for child in grid.Children:
child._parent_id.append(i + grid._id_offset)
def _count_grids(self):
# We can get everything from the Header file, but note that we're
# duplicating some work done elsewhere. In a future where we don't
# pre-allocate grid arrays, this becomes unnecessary.
header_file = open(self.header_filename, "r")
header_file.seek(self.dataset._header_mesh_start)
# Skip over the level dxs, geometry and the zero:
[next(header_file) for i in range(self.dataset._max_level + 3)]
# Now we need to be very careful, as we've seeked, and now we iterate.
# Does this work? We are going to count the number of places that we
# have a three-item line. The three items would be level, number of
# grids, and then grid time.
self.num_grids = 0
for line in header_file:
if len(line.split()) != 3:
continue
self.num_grids += int(line.split()[1])
def _initialize_grid_arrays(self):
super(BoxlibHierarchy, self)._initialize_grid_arrays()
self.grid_start_index = np.zeros((self.num_grids, 3), "int64")
def _initialize_state_variables(self):
"""override to not re-initialize num_grids in AMRHierarchy.__init__
"""
self._parallel_locking = False
self._data_file = None
self._data_mode = None
def _detect_output_fields(self):
# This is all done in _parse_header_file
self.field_list = [("boxlib", f) for f in self.dataset._field_list]
self.field_indexes = dict((f[1], i) for i, f in enumerate(self.field_list))
# There are times when field_list may change. We copy it here to
# avoid that possibility.
self.field_order = [f for f in self.field_list]
def _setup_data_io(self):
self.io = io_registry[self.dataset_type](self.dataset)
def _determine_particle_output_type(self, directory_name):
header_filename = self.ds.output_dir + "/" + directory_name + "/Header"
with open(header_filename, "r") as f:
version_string = f.readline().strip()
if version_string.startswith("Version_Two"):
return AMReXParticleHeader
else:
return BoxLibParticleHeader
def _read_particles(self, directory_name, is_checkpoint, extra_field_names=None):
pheader = self._determine_particle_output_type(directory_name)
self.particle_headers[directory_name] = pheader(
self.ds, directory_name, is_checkpoint, extra_field_names
)
num_parts = self.particle_headers[directory_name].num_particles
if self.ds._particle_type_counts is None:
self.ds._particle_type_counts = {}
self.ds._particle_type_counts[directory_name] = num_parts
base = os.path.join(self.ds.output_dir, directory_name)
if len(glob.glob(os.path.join(base, "Level_?", "DATA_????"))) > 0:
base_particle_fn = os.path.join(base, "Level_%d", "DATA_%.4d")
elif len(glob.glob(os.path.join(base, "Level_?", "DATA_?????"))) > 0:
base_particle_fn = os.path.join(base, "Level_%d", "DATA_%.5d")
else:
return
gid = 0
for lev, data in self.particle_headers[directory_name].data_map.items():
for pdf in data.values():
pdict = self.grids[gid]._pdata
pdict[directory_name] = {}
pdict[directory_name]["particle_filename"] = base_particle_fn % (
lev,
pdf.file_number,
)
pdict[directory_name]["offset"] = pdf.offset
pdict[directory_name]["NumberOfParticles"] = pdf.num_particles
self.grid_particle_count[gid] += pdf.num_particles
self.grids[gid].NumberOfParticles += pdf.num_particles
gid += 1
# add particle fields to field_list
pfield_list = self.particle_headers[directory_name].known_fields
self.field_list.extend(pfield_list)
class BoxlibDataset(Dataset):
"""
This class is a stripped down class that simply reads and parses
*filename*, without looking at the Boxlib index.
"""
_index_class = BoxlibHierarchy
_field_info_class = BoxlibFieldInfo
_output_prefix = None
# THIS SHOULD BE FIXED:
periodicity = (True, True, True)
def __init__(
self,
output_dir,
cparam_filename="job_info", # todo: harmonise this default value with docstring
fparam_filename=None,
dataset_type="boxlib_native",
storage_filename=None,
units_override=None,
unit_system="cgs",
):
"""
The paramfile is usually called "inputs"
and there may be a fortran inputs file usually called "probin"
plotname here will be a directory name
as per BoxLib, dataset_type will be Native (implemented here), IEEE (not
yet implemented) or ASCII (not yet implemented.)
"""
self.fluid_types += ("boxlib",)
self.output_dir = os.path.abspath(os.path.expanduser(output_dir))
self.cparam_filename = self._lookup_cparam_filepath(
output_dir, cparam_filename=cparam_filename
)
self.fparam_filename = self._localize_check(fparam_filename)
self.storage_filename = storage_filename
Dataset.__init__(
self,
output_dir,
dataset_type,
units_override=units_override,
unit_system=unit_system,
)
# These are still used in a few places.
if "HydroMethod" not in self.parameters.keys():
self.parameters["HydroMethod"] = "boxlib"
self.parameters["Time"] = 1.0 # default unit is 1...
self.parameters["EOSType"] = -1 # default
self.parameters["gamma"] = self.parameters.get("materials.gamma", 1.6667)
def _localize_check(self, fn):
if fn is None:
return None
# If the file exists, use it. If not, set it to None.
root_dir = os.path.dirname(self.output_dir)
full_fn = os.path.join(root_dir, fn)
if os.path.exists(full_fn):
return full_fn
return None
@classmethod
def _is_valid(cls, *args, **kwargs):
# fill our args
output_dir = args[0]
header_filename = os.path.join(output_dir, "Header")
# boxlib datasets are always directories, and
# We *know* it's not boxlib if Header doesn't exist.
return os.path.exists(header_filename)
@classmethod
def _lookup_cparam_filepath(cls, *args, **kwargs):
output_dir = args[0]
iargs = inspect.getcallargs(cls.__init__, args, kwargs)
lookup_table = [
os.path.abspath(os.path.join(p, iargs["cparam_filename"]))
for p in (output_dir, os.path.dirname(output_dir))
]
found = [os.path.exists(file) for file in lookup_table]
if not any(found):
return None
return lookup_table[found.index(True)]
@classmethod
def _is_valid_subtype(cls, *args, **kwargs):
# this is used by derived classes
output_dir = args[0]
if not BoxlibDataset._is_valid(output_dir):
return False
cparam_filepath = cls._lookup_cparam_filepath(*args, **kwargs)
if cparam_filepath is None:
return False
lines = [line.lower() for line in open(cparam_filepath).readlines()]
return any(cls._subtype_keyword in line for line in lines)
def _parse_parameter_file(self):
"""
Parses the parameter file and establishes the various
dictionaries.
"""
self._parse_header_file()
# Let's read the file
hfn = os.path.join(self.output_dir, "Header")
self.unique_identifier = int(os.stat(hfn)[ST_CTIME])
# the 'inputs' file is now optional
self._parse_cparams()
self._parse_fparams()
def _parse_cparams(self):
if self.cparam_filename is None:
return
for line in (line.split("#")[0].strip() for line in open(self.cparam_filename)):
try:
param, vals = [s.strip() for s in line.split("=")]
except ValueError:
continue
if param == "amr.n_cell":
vals = self.domain_dimensions = np.array(vals.split(), dtype="int32")
# For 1D and 2D simulations in BoxLib usually only the relevant
# dimensions have a specified number of zones, but yt requires
# domain_dimensions to have three elements, with 1 in the additional
# slots if we're not in 3D, so append them as necessary.
if len(vals) == 1:
vals = self.domain_dimensions = np.array([vals[0], 1, 1])
elif len(vals) == 2:
vals = self.domain_dimensions = np.array([vals[0], vals[1], 1])
elif param == "amr.ref_ratio":
vals = self.refine_by = int(vals[0])
elif param == "Prob.lo_bc":
vals = self.periodicity = ensure_tuple([i == 0 for i in vals.split()])
elif param == "castro.use_comoving":
vals = self.cosmological_simulation = int(vals)
else:
try:
vals = _guess_pcast(vals)
except IndexError:
# hitting an empty string
vals = None
self.parameters[param] = vals
if getattr(self, "cosmological_simulation", 0) == 1:
self.omega_lambda = self.parameters["comoving_OmL"]
self.omega_matter = self.parameters["comoving_OmM"]
self.hubble_constant = self.parameters["comoving_h"]
a_file = open(os.path.join(self.output_dir, "comoving_a"))
line = a_file.readline().strip()
a_file.close()
self.current_redshift = 1 / float(line) - 1
else:
self.current_redshift = (
self.omega_lambda
) = (
self.omega_matter
) = self.hubble_constant = self.cosmological_simulation = 0.0
def _parse_fparams(self):
"""
Parses the fortran parameter file for Orion. Most of this will
be useless, but this is where it keeps mu = mass per
particle/m_hydrogen.
"""
if self.fparam_filename is None:
return
for line in (l for l in open(self.fparam_filename) if "=" in l):
param, vals = [v.strip() for v in line.split("=")]
# Now, there are a couple different types of parameters.
# Some will be where you only have floating point values, others
# will be where things are specified as string literals.
# Unfortunately, we're also using Fortran values, which will have
# things like 1.d-2 which is pathologically difficult to parse if
# your C library doesn't include 'd' in its locale for strtod.
# So we'll try to determine this.
vals = vals.split()
if any(_scinot_finder.match(v) for v in vals):
vals = [float(v.replace("D", "e").replace("d", "e")) for v in vals]
if len(vals) == 1:
vals = vals[0]
self.parameters[param] = vals
def _parse_header_file(self):
"""
We parse the Boxlib header, which we use as our basis. Anything in the
inputs file will override this, but the inputs file is not strictly
necessary for orientation of the data in space.
"""
# Note: Python uses a read-ahead buffer, so using next(), which would
# be my preferred solution, won't work here. We have to explicitly
# call readline() if we want to end up with an offset at the very end.
# Fortunately, elsewhere we don't care about the offset, so we're fine
# everywhere else using iteration exclusively.
header_file = open(os.path.join(self.output_dir, "Header"))
self.orion_version = header_file.readline().rstrip()
n_fields = int(header_file.readline())
self._field_list = [header_file.readline().strip() for i in range(n_fields)]
self.dimensionality = int(header_file.readline())
self.current_time = float(header_file.readline())
# This is traditionally a index attribute, so we will set it, but
# in a slightly hidden variable.
self._max_level = int(header_file.readline())
self.domain_left_edge = np.array(
header_file.readline().split(), dtype="float64"
)
self.domain_right_edge = np.array(
header_file.readline().split(), dtype="float64"
)
ref_factors = np.array([int(i) for i in header_file.readline().split()])
if ref_factors.size == 0:
# We use a default of two, as Nyx doesn't always output this value
ref_factors = [2] * (self._max_level + 1)
# We can't vary refinement factors based on dimension, or whatever else
# they are varied on. In one curious thing, I found that some Castro 3D
# data has only two refinement factors, which I don't know how to
# understand.
self.ref_factors = ref_factors
if np.unique(ref_factors).size > 1:
# We want everything to be a multiple of this.
self.refine_by = min(ref_factors)
# Check that they're all multiples of the minimum.
if not all(
float(rf) / self.refine_by == int(float(rf) / self.refine_by)
for rf in ref_factors
):
raise RuntimeError
base_log = np.log2(self.refine_by)
self.level_offsets = [0] # level 0 has to have 0 offset
lo = 0
for rf in self.ref_factors:
lo += int(np.log2(rf) / base_log) - 1
self.level_offsets.append(lo)
# assert(np.unique(ref_factors).size == 1)
else:
self.refine_by = ref_factors[0]
self.level_offsets = [0 for l in range(self._max_level + 1)]
# Now we read the global index space, to get
index_space = header_file.readline()
# This will be of the form:
# ((0,0,0) (255,255,255) (0,0,0)) ((0,0,0) (511,511,511) (0,0,0))
# So note that if we split it all up based on spaces, we should be
# fine, as long as we take the first two entries, which correspond to
# the root level. I'm not 100% pleased with this solution.
root_space = index_space.replace("(", "").replace(")", "").split()[:2]
start = np.array(root_space[0].split(","), dtype="int64")
stop = np.array(root_space[1].split(","), dtype="int64")
self.domain_dimensions = stop - start + 1
# Skip timesteps per level
header_file.readline()
self._header_mesh_start = header_file.tell()
# Skip the cell size information per level - we'll get this later
for _ in range(self._max_level + 1):
header_file.readline()
# Get the geometry
next_line = header_file.readline()
if len(next_line.split()) == 1:
coordinate_type = int(next_line)
else:
coordinate_type = 0
if coordinate_type == 0:
self.geometry = "cartesian"
elif coordinate_type == 1:
self.geometry = "cylindrical"
elif coordinate_type == 2:
self.geometry = "spherical"
else:
raise RuntimeError("Unknown BoxLib coord_type")
# overrides for 1/2-dimensional data
if self.dimensionality == 1:
self._setup1d()
elif self.dimensionality == 2:
self._setup2d()
def _set_code_unit_attributes(self):
setdefaultattr(self, "length_unit", self.quan(1.0, "cm"))
setdefaultattr(self, "mass_unit", self.quan(1.0, "g"))
setdefaultattr(self, "time_unit", self.quan(1.0, "s"))
setdefaultattr(self, "velocity_unit", self.quan(1.0, "cm/s"))
def _setup1d(self):
# self._index_class = BoxlibHierarchy1D
# self._fieldinfo_fallback = Orion1DFieldInfo
self.domain_left_edge = np.concatenate([self.domain_left_edge, [0.0, 0.0]])
self.domain_right_edge = np.concatenate([self.domain_right_edge, [1.0, 1.0]])
tmp = self.domain_dimensions.tolist()
tmp.extend((1, 1))
self.domain_dimensions = np.array(tmp)
tmp = list(self.periodicity)
tmp[1] = False
tmp[2] = False
self.periodicity = ensure_tuple(tmp)
def _setup2d(self):
self.domain_left_edge = np.concatenate([self.domain_left_edge, [0.0]])
self.domain_right_edge = np.concatenate([self.domain_right_edge, [1.0]])
if self.geometry == "cylindrical":
dre = self.domain_right_edge
dre[2] = 2.0 * np.pi
self.domain_right_edge = dre
tmp = self.domain_dimensions.tolist()
tmp.append(1)
self.domain_dimensions = np.array(tmp)
tmp = list(self.periodicity)
tmp[2] = False
self.periodicity = ensure_tuple(tmp)
@parallel_root_only
def print_key_parameters(self):
for a in [
"current_time",
"domain_dimensions",
"domain_left_edge",
"domain_right_edge",
]:
if not hasattr(self, a):
mylog.error("Missing %s in parameter file definition!", a)
continue
v = getattr(self, a)
mylog.info("Parameters: %-25s = %s", a, v)
def relative_refinement(self, l0, l1):
offset = self.level_offsets[l1] - self.level_offsets[l0]
return self.refine_by ** (l1 - l0 + offset)
class OrionHierarchy(BoxlibHierarchy):
def __init__(self, ds, dataset_type="orion_native"):
BoxlibHierarchy.__init__(self, ds, dataset_type)
self._read_particles()
# self.io = IOHandlerOrion
def _detect_output_fields(self):
# This is all done in _parse_header_file
self.field_list = [("boxlib", f) for f in self.dataset._field_list]
self.field_indexes = dict((f[1], i) for i, f in enumerate(self.field_list))
# There are times when field_list may change. We copy it here to
# avoid that possibility.
self.field_order = [f for f in self.field_list]
# look for particle fields
self.particle_filename = None
for particle_filename in ["StarParticles", "SinkParticles"]:
fn = os.path.join(self.ds.output_dir, particle_filename)
if os.path.exists(fn):
self.particle_filename = fn
if self.particle_filename is None:
return
pfield_list = [("io", c) for c in self.io.particle_field_index.keys()]
self.field_list.extend(pfield_list)
def _read_particles(self):
"""
Reads in particles and assigns them to grids. Will search for
Star particles, then sink particles if no star particle file
is found, and finally will simply note that no particles are
found if neither works. To add a new Orion particle type,
simply add it to the if/elif/else block.
"""
self.grid_particle_count = np.zeros(len(self.grids))
if self.particle_filename is not None:
self._read_particle_file(self.particle_filename)
def _read_particle_file(self, fn):
"""actually reads the orion particle data file itself.
"""
if not os.path.exists(fn):
return
with open(fn, "r") as f:
lines = f.readlines()
self.num_stars = int(lines[0].strip()[0])
for num, line in enumerate(lines[1:]):
particle_position_x = float(line.split(" ")[1])
particle_position_y = float(line.split(" ")[2])
particle_position_z = float(line.split(" ")[3])
coord = [particle_position_x, particle_position_y, particle_position_z]
# for each particle, determine which grids contain it
# copied from object_finding_mixin.py
mask = np.ones(self.num_grids)
for i in range(len(coord)):
np.choose(
np.greater(self.grid_left_edge.d[:, i], coord[i]),
(mask, 0),
mask,
)
np.choose(
np.greater(self.grid_right_edge.d[:, i], coord[i]),
(0, mask),
mask,
)
ind = np.where(mask == 1)
selected_grids = self.grids[ind]
# in orion, particles always live on the finest level.
# so, we want to assign the particle to the finest of
# the grids we just found
if len(selected_grids) != 0:
grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
ind = np.where(self.grids == grid)[0][0]
self.grid_particle_count[ind] += 1
self.grids[ind].NumberOfParticles += 1
# store the position in the particle file for fast access.
try:
self.grids[ind]._particle_line_numbers.append(num + 1)
except AttributeError:
self.grids[ind]._particle_line_numbers = [num + 1]
return True
class OrionDataset(BoxlibDataset):
_index_class = OrionHierarchy
_subtype_keyword = "hyp."
def __init__(
self,
output_dir,
cparam_filename="inputs",
fparam_filename="probin",
dataset_type="orion_native",
storage_filename=None,
units_override=None,
unit_system="cgs",
):
BoxlibDataset.__init__(
self,
output_dir,
cparam_filename,
fparam_filename,
dataset_type,
units_override=units_override,
unit_system=unit_system,
)
@classmethod
def _is_valid(cls, *args, **kwargs):
return cls._is_valid_subtype(*args, **kwargs)
class CastroHierarchy(BoxlibHierarchy):
def __init__(self, ds, dataset_type="castro_native"):
super(CastroHierarchy, self).__init__(ds, dataset_type)
if "particles" in self.ds.parameters:
# extra beyond the base real fields that all Boxlib
# particles have, i.e. the xyz positions
castro_extra_real_fields = [
"particle_velocity_x",
"particle_velocity_y",
"particle_velocity_z",
]
is_checkpoint = True
self._read_particles(
"Tracer",
is_checkpoint,
castro_extra_real_fields[0 : self.ds.dimensionality],
)
class CastroDataset(BoxlibDataset):
_index_class = CastroHierarchy
_field_info_class = CastroFieldInfo
_subtype_keyword = "castro"
def __init__(
self,
output_dir,
cparam_filename="job_info",
fparam_filename=None,
dataset_type="boxlib_native",
storage_filename=None,
units_override=None,
unit_system="cgs",
):
super(CastroDataset, self).__init__(
output_dir,
cparam_filename,
fparam_filename,
dataset_type,
storage_filename,
units_override,
unit_system,
)
@classmethod
def _is_valid(cls, *args, **kwargs):
return cls._is_valid_subtype(*args, **kwargs)
def _parse_parameter_file(self):
super(CastroDataset, self)._parse_parameter_file()
jobinfo_filename = os.path.join(self.output_dir, self.cparam_filename)
line = ""
with open(jobinfo_filename, "r") as f:
while not line.startswith(" Inputs File Parameters"):
# boundary condition info starts with -x:, etc.
bcs = ["-x:", "+x:", "-y:", "+y:", "-z:", "+z:"]
if any(b in line for b in bcs):
p, v = line.strip().split(":")
self.parameters[p] = v.strip()
if "git describe" in line or "git hash" in line:
# Castro release 17.02 and later
# line format: codename git describe: the-hash
# Castro before release 17.02
# line format: codename git hash: the-hash
fields = line.split(":")
self.parameters[fields[0]] = fields[1].strip()
line = next(f)
# runtime parameters that we overrode follow "Inputs File
# Parameters"
# skip the "====..." line
line = next(f)
for line in f:
if line.strip() == "" or "fortin parameters" in line:
continue
p, v = line.strip().split("=")
self.parameters[p] = v.strip()
# hydro method is set by the base class -- override it here
self.parameters["HydroMethod"] = "Castro"
# set the periodicity based on the runtime parameters
periodicity = [True, True, True]
if not self.parameters["-x"] == "interior":
periodicity[0] = False
if self.dimensionality >= 2:
if not self.parameters["-y"] == "interior":
periodicity[1] = False
if self.dimensionality == 3:
if not self.parameters["-z"] == "interior":
periodicity[2] = False
self.periodicity = ensure_tuple(periodicity)
if os.path.isdir(os.path.join(self.output_dir, "Tracer")):
# we have particles
self.parameters["particles"] = 1
self.particle_types = ("Tracer",)
self.particle_types_raw = self.particle_types
class MaestroDataset(BoxlibDataset):
_field_info_class = MaestroFieldInfo
_subtype_keyword = "maestro"
def __init__(
self,
output_dir,
cparam_filename="job_info",
fparam_filename=None,
dataset_type="boxlib_native",
storage_filename=None,
units_override=None,
unit_system="cgs",
):
super(MaestroDataset, self).__init__(
output_dir,
cparam_filename,
fparam_filename,
dataset_type,
storage_filename,
units_override,
unit_system,
)
@classmethod
def _is_valid(cls, *args, **kwargs):
return cls._is_valid_subtype(*args, **kwargs)
def _parse_parameter_file(self):
super(MaestroDataset, self)._parse_parameter_file()
jobinfo_filename = os.path.join(self.output_dir, self.cparam_filename)
with open(jobinfo_filename, "r") as f:
for line in f:
# get the code git hashes
if "git hash" in line:
# line format: codename git hash: the-hash
fields = line.split(":")
self.parameters[fields[0]] = fields[1].strip()
with open(jobinfo_filename, "r") as f:
# get the runtime parameters
for line in f:
try:
p, v = (_.strip() for _ in line[4:].split("=", 1))
if len(v) == 0:
self.parameters[p] = ""
else:
self.parameters[p] = _guess_pcast(v)
except ValueError:
# not a parameter line
pass
# hydro method is set by the base class -- override it here
self.parameters["HydroMethod"] = "Maestro"
# set the periodicity based on the integer BC runtime parameters
periodicity = [True, True, True]
for i, ax in enumerate("xyz"):
try:
periodicity[i] = self.parameters[f"bc{ax}_lo"] != -1
except KeyError:
pass
self.periodicity = ensure_tuple(periodicity)
class NyxHierarchy(BoxlibHierarchy):
def __init__(self, ds, dataset_type="nyx_native"):
super(NyxHierarchy, self).__init__(ds, dataset_type)
if "particles" in self.ds.parameters:
# extra beyond the base real fields that all Boxlib
# particles have, i.e. the xyz positions
nyx_extra_real_fields = [
"particle_mass",
"particle_velocity_x",
"particle_velocity_y",
"particle_velocity_z",
]
is_checkpoint = False
self._read_particles(
"DM",
is_checkpoint,
nyx_extra_real_fields[0 : self.ds.dimensionality + 1],
)
class NyxDataset(BoxlibDataset):
_index_class = NyxHierarchy
_field_info_class = NyxFieldInfo
_subtype_keyword = "nyx"
def __init__(
self,
output_dir,
cparam_filename="job_info",
fparam_filename=None,
dataset_type="boxlib_native",
storage_filename=None,
units_override=None,
unit_system="cgs",
):
super(NyxDataset, self).__init__(
output_dir,
cparam_filename,
fparam_filename,
dataset_type,
storage_filename,
units_override,
unit_system,
)
@classmethod
def _is_valid(cls, *args, **kwargs):
return cls._is_valid_subtype(*args, **kwargs)
def _parse_parameter_file(self):
super(NyxDataset, self)._parse_parameter_file()
jobinfo_filename = os.path.join(self.output_dir, self.cparam_filename)
with open(jobinfo_filename, "r") as f:
for line in f:
# get the code git hashes
if "git hash" in line:
# line format: codename git hash: the-hash
fields = line.split(":")
self.parameters[fields[0]] = fields[1].strip()
if line.startswith(" Cosmology Information"):
self.cosmological_simulation = 1
break
else:
self.cosmological_simulation = 0
if self.cosmological_simulation:
# note that modern Nyx is always cosmological, but there are some old
# files without these parameters so we want to special-case them
for line in f:
if "Omega_m (comoving)" in line:
self.omega_matter = float(line.split(":")[1])
elif "Omega_lambda (comoving)" in line:
self.omega_lambda = float(line.split(":")[1])
elif "h (comoving)" in line:
self.hubble_constant = float(line.split(":")[1])
# Read in the `comoving_a` file and parse the value. We should fix this
# in the new Nyx output format...
a_file = open(os.path.join(self.output_dir, "comoving_a"))
a_string = a_file.readline().strip()
a_file.close()
# Set the scale factor and redshift
self.cosmological_scale_factor = float(a_string)
self.parameters["CosmologyCurrentRedshift"] = 1 / float(a_string) - 1
# alias
self.current_redshift = self.parameters["CosmologyCurrentRedshift"]
if os.path.isfile(os.path.join(self.output_dir, "DM/Header")):
# we have particles
self.parameters["particles"] = 1
self.particle_types = ("DM",)
self.particle_types_raw = self.particle_types
def _set_code_unit_attributes(self):
setdefaultattr(self, "mass_unit", self.quan(1.0, "Msun"))
setdefaultattr(self, "time_unit", self.quan(1.0 / 3.08568025e19, "s"))
setdefaultattr(
self, "length_unit", self.quan(1.0 / (1 + self.current_redshift), "Mpc")
)
setdefaultattr(self, "velocity_unit", self.length_unit / self.time_unit)
def _guess_pcast(vals):
# Now we guess some things about the parameter and its type
# Just in case there are multiple; we'll go
# back afterward to using vals.
v = vals.split()[0]
try:
float(v.upper().replace("D", "E"))
except Exception:
pcast = str
if v in ("F", "T"):
pcast = bool
else:
syms = (".", "D+", "D-", "E+", "E-", "E", "D")
if any(sym in v.upper() for sym in syms for v in vals.split()):
pcast = float
else:
pcast = int
if pcast == bool:
vals = [value == "T" for value in vals.split()]
else:
vals = [pcast(value) for value in vals.split()]
if len(vals) == 1:
vals = vals[0]
return vals
def _read_raw_field_names(raw_file):
header_files = glob.glob(raw_file + "*_H")
return [hf.split(os.sep)[-1][:-2] for hf in header_files]
def _string_to_numpy_array(s):
return np.array([int(v) for v in s[1:-1].split(",")], dtype=np.int64)
def _line_to_numpy_arrays(line):
lo_corner = _string_to_numpy_array(line[0][1:])
hi_corner = _string_to_numpy_array(line[1][:])
node_type = _string_to_numpy_array(line[2][:-1])
return lo_corner, hi_corner, node_type
def _get_active_dimensions(box):
return box[1] - box[2] - box[0] + 1
def _read_header(raw_file, field):
level_files = glob.glob(raw_file + "Level_*")
level_files.sort()
all_boxes = []
all_file_names = []
all_offsets = []
for level_file in level_files:
header_file = level_file + "/" + field + "_H"
with open(header_file, "r") as f:
f.readline() # version
f.readline() # how
f.readline() # ncomp
nghost_lines = f.readline().strip().split()
try:
ng = int(nghost_lines[0])
nghost = np.array([ng, ng, ng])
except ValueError:
nghosts = nghost_lines[0][1:-1].split(",")
nghost = np.array([int(ng) for ng in nghosts])
f.readline() # num boxes
# read boxes
boxes = []
for line in f:
clean_line = line.strip().split()
if clean_line == [")"]:
break
lo_corner, hi_corner, node_type = _line_to_numpy_arrays(clean_line)
boxes.append((lo_corner, hi_corner, node_type))
# read the file and offset position for the corresponding box
file_names = []
offsets = []
for line in f:
if line.startswith("FabOnDisk:"):
clean_line = line.strip().split()
file_names.append(clean_line[1])
offsets.append(int(clean_line[2]))
all_boxes += boxes
all_file_names += file_names
all_offsets += offsets
return nghost, all_boxes, all_file_names, all_offsets
class WarpXHeader:
def __init__(self, header_fn):
self.data = {}
with open(header_fn, "r") as f:
self.data["Checkpoint_version"] = int(f.readline().strip().split()[-1])
self.data["num_levels"] = int(f.readline().strip().split()[-1])
self.data["istep"] = [int(num) for num in f.readline().strip().split()]
self.data["nsubsteps"] = [int(num) for num in f.readline().strip().split()]
self.data["t_new"] = [float(num) for num in f.readline().strip().split()]
self.data["t_old"] = [float(num) for num in f.readline().strip().split()]
self.data["dt"] = [float(num) for num in f.readline().strip().split()]
self.data["moving_window_x"] = float(f.readline().strip().split()[-1])
# not all datasets will have is_synchronized
line = f.readline().strip().split()
if len(line) == 1:
self.data["is_synchronized"] = bool(line[-1])
self.data["prob_lo"] = [
float(num) for num in f.readline().strip().split()
]
else:
self.data["is_synchronized"] = True
self.data["prob_lo"] = [float(num) for num in line]
self.data["prob_hi"] = [float(num) for num in f.readline().strip().split()]
for _ in range(self.data["num_levels"]):
num_boxes = int(f.readline().strip().split()[0][1:])
for __ in range(num_boxes):
f.readline()
f.readline()
i = 0
line = f.readline()
while line:
line = line.strip().split()
if len(line) == 1:
line = f.readline()
continue
self.data["species_%d" % i] = [float(val) for val in line]
i = i + 1
line = f.readline()
class WarpXHierarchy(BoxlibHierarchy):
def __init__(self, ds, dataset_type="boxlib_native"):
super(WarpXHierarchy, self).__init__(ds, dataset_type)
is_checkpoint = True
for ptype in self.ds.particle_types:
self._read_particles(ptype, is_checkpoint)
# Additional WarpX particle information (used to set up species)
self.warpx_header = WarpXHeader(self.ds.output_dir + "/WarpXHeader")
for key, val in self.warpx_header.data.items():
if key.startswith("species_"):
i = int(key.split("_")[-1])
charge_name = "particle%.1d_charge" % i
mass_name = "particle%.1d_mass" % i
self.parameters[charge_name] = val[0]
self.parameters[mass_name] = val[1]
def _detect_output_fields(self):
super(WarpXHierarchy, self)._detect_output_fields()
# now detect the optional, non-cell-centered fields
self.raw_file = self.ds.output_dir + "/raw_fields/"
self.raw_fields = _read_raw_field_names(self.raw_file + "Level_0/")
self.field_list += [("raw", f) for f in self.raw_fields]
self.raw_field_map = {}
self.ds.nodal_flags = {}
self.raw_field_nghost = {}
for field_name in self.raw_fields:
nghost, boxes, file_names, offsets = _read_header(self.raw_file, field_name)
self.raw_field_map[field_name] = (boxes, file_names, offsets)
self.raw_field_nghost[field_name] = nghost
self.ds.nodal_flags[field_name] = np.array(boxes[0][2])
def _skip_line(line):
if len(line) == 0:
return True
if line[0] == "\n":
return True
if line[0] == "=":
return True
if line[0] == " ":
return True
class WarpXDataset(BoxlibDataset):
_index_class = WarpXHierarchy
_field_info_class = WarpXFieldInfo
_subtype_keyword = "warpx"
def __init__(
self,
output_dir,
cparam_filename="warpx_job_info",
fparam_filename=None,
dataset_type="boxlib_native",
storage_filename=None,
units_override=None,
unit_system="mks",
):
self.default_fluid_type = "mesh"
self.default_field = ("mesh", "density")
self.fluid_types = ("mesh", "index", "raw")
super(WarpXDataset, self).__init__(
output_dir,
cparam_filename,
fparam_filename,
dataset_type,
storage_filename,
units_override,
unit_system,
)
@classmethod
def _is_valid(cls, *args, **kwargs):
return cls._is_valid_subtype(*args, **kwargs)
def _parse_parameter_file(self):
super(WarpXDataset, self)._parse_parameter_file()
jobinfo_filename = os.path.join(self.output_dir, self.cparam_filename)
with open(jobinfo_filename, "r") as f:
for line in f.readlines():
if _skip_line(line):
continue
l = line.strip().split(":")
if len(l) == 2:
self.parameters[l[0].strip()] = l[1].strip()
l = line.strip().split("=")
if len(l) == 2:
self.parameters[l[0].strip()] = l[1].strip()
# set the periodicity based on the integer BC runtime parameters
is_periodic = self.parameters["geometry.is_periodic"].split()
periodicity = [bool(val) for val in is_periodic]
for _ in range(self.dimensionality, 3):
periodicity += [True] # pad to 3D
self.periodicity = ensure_tuple(periodicity)
particle_types = glob.glob(self.output_dir + "/*/Header")
particle_types = [cpt.split(os.sep)[-2] for cpt in particle_types]
if len(particle_types) > 0:
self.parameters["particles"] = 1
self.particle_types = tuple(particle_types)
self.particle_types_raw = self.particle_types
else:
self.particle_types = ()
self.particle_types_raw = ()
def _set_code_unit_attributes(self):
setdefaultattr(self, "length_unit", self.quan(1.0, "m"))
setdefaultattr(self, "mass_unit", self.quan(1.0, "kg"))
setdefaultattr(self, "time_unit", self.quan(1.0, "s"))
setdefaultattr(self, "velocity_unit", self.quan(1.0, "m/s"))
setdefaultattr(self, "magnetic_unit", self.quan(1.0, "T"))
class AMReXHierarchy(BoxlibHierarchy):
def __init__(self, ds, dataset_type="boxlib_native"):
super(AMReXHierarchy, self).__init__(ds, dataset_type)
if "particles" in self.ds.parameters:
is_checkpoint = True
for ptype in self.ds.particle_types:
self._read_particles(ptype, is_checkpoint)
class AMReXDataset(BoxlibDataset):
_index_class = AMReXHierarchy
def __init__(
self,
output_dir,
cparam_filename="job_info",
fparam_filename=None,
dataset_type="boxlib_native",
storage_filename=None,
units_override=None,
unit_system="cgs",
):
super(AMReXDataset, self).__init__(
output_dir,
cparam_filename,
fparam_filename,
dataset_type,
storage_filename,
units_override,
unit_system,
)
def _parse_parameter_file(self):
super(AMReXDataset, self)._parse_parameter_file()
particle_types = glob.glob(self.output_dir + "/*/Header")
particle_types = [cpt.split(os.sep)[-2] for cpt in particle_types]
if len(particle_types) > 0:
self.parameters["particles"] = 1
self.particle_types = tuple(particle_types)
self.particle_types_raw = self.particle_types
@classmethod
def _is_valid(cls, *args, **kwargs):
return False
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: wangye
@file: lesson1.py
@time: 2020/08/25
@contact: wangye.hope@gmail.com
@site:
@software: PyCharm
"""
import pygame
from pygame.locals import *
if __name__ == '__main__':
pygame.init()
screen = pygame.display.set_mode((640, 480), RESIZABLE, 32)
pygame.display.set_caption('Hello World!')
background = pygame.image.load('sushiplate.jpg').convert()
mouse_cursor = pygame.image.load('fugu.png').convert_alpha()
while True:
for event in pygame.event.get():
if event.type == QUIT:
exit()
screen.blit(background, (0, 0))
x, y = pygame.mouse.get_pos()
x -= mouse_cursor.get_width() / 2
y -= mouse_cursor.get_height() / 2
screen.blit(mouse_cursor, (x, y))
pygame.display.update()
|
# -*- coding: utf-8 -*-
import time
from .. import base
from girder.exceptions import ValidationException
from girder.models.notification import ProgressState
from girder.models.setting import Setting
from girder.models.token import Token
from girder.models.user import User
from girder.settings import SettingKey
from girder.utility.progress import ProgressContext
def setUpModule():
base.startServer()
def tearDownModule():
base.stopServer()
class NotificationTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
self.admin = User().createUser(
email='admin@girder.test', login='admin', firstName='first',
lastName='last', password='mypasswd')
def _testStream(self, user, token=None):
# Should only work for users or token sessions
resp = self.request(path='/notification/stream', method='GET')
self.assertStatus(resp, 401)
self.assertEqual(
resp.json['message'],
'You must be logged in or have a valid auth token.')
resp = self.request(path='/notification/stream', method='GET',
user=user, token=token, isJson=False,
params={'timeout': 0})
self.assertStatusOk(resp)
self.assertEqual(self.getBody(resp), '')
# Should not work when disabled
Setting().set(SettingKey.ENABLE_NOTIFICATION_STREAM, False)
resp = self.request(path='/notification/stream', method='GET',
user=user, token=token, isJson=False,
params={'timeout': 0})
self.assertStatus(resp, 503)
Setting().set(SettingKey.ENABLE_NOTIFICATION_STREAM, True)
# Use a very high rate-limit interval so that we don't fail on slow
# build boxes
with ProgressContext(
True, user=user, token=token, title='Test', total=100,
interval=100) as progress:
progress.update(current=1)
# Rate limiting should make it so we didn't write the immediate
# update within the time interval.
resp = self.request(path='/notification/stream', method='GET',
user=user, token=token, isJson=False,
params={'timeout': 0})
messages = self.getSseMessages(resp)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0]['type'], 'progress')
self.assertEqual(messages[0]['data']['total'], 100)
self.assertEqual(messages[0]['data']['current'], 0)
self.assertFalse(ProgressState.isComplete(
messages[0]['data']['state']))
# Now use a very short interval to test that we do save changes
progress.interval = 0.01
time.sleep(0.02)
progress.update(current=2)
resp = self.request(path='/notification/stream', method='GET',
user=user, token=token, isJson=False,
params={'timeout': 0})
messages = self.getSseMessages(resp)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0]['data']['current'], 2)
# If we use a non-numeric value, nothing bad should happen
time.sleep(0.02)
progress.update(current='not_a_number')
resp = self.request(path='/notification/stream', method='GET',
user=user, token=token, isJson=False,
params={'timeout': 0})
messages = self.getSseMessages(resp)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0]['data']['current'], 'not_a_number')
# Updating the progress without saving and then exiting should
# send the update.
progress.interval = 1000
progress.update(current=3)
# The message should contain a timestamp
self.assertIn('_girderTime', messages[0])
self.assertIsInstance(messages[0]['_girderTime'], int)
# Test that the "since" parameter correctly filters out messages
since = messages[0]['_girderTime'] + 1
resp = self.request(path='/notification/stream', method='GET',
user=user, token=token, isJson=False,
params={'timeout': 0, 'since': since})
messages = self.getSseMessages(resp)
self.assertEqual(len(messages), 0)
# Exiting the context manager should flush the most recent update.
resp = self.request(path='/notification/stream', method='GET',
user=user, token=token, isJson=False,
params={'timeout': 0})
messages = self.getSseMessages(resp)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0]['data']['current'], 3)
# Test a ValidationException within the progress context
try:
with ProgressContext(True, user=user, token=token, title='Test', total=100):
raise ValidationException('Test Message')
except ValidationException:
pass
# Exiting the context manager should flush the most recent update.
resp = self.request(path='/notification/stream', method='GET',
user=user, token=token, isJson=False,
params={'timeout': 0})
messages = self.getSseMessages(resp)
self.assertEqual(messages[-1]['data']['message'],
'Error: Test Message')
def testUserStream(self):
self._testStream(self.admin)
def testTokenStream(self):
resp = self.request(path='/token/session', method='GET')
self.assertStatusOk(resp)
token = resp.json['token']
tokenDoc = Token().load(token, force=True, objectId=False)
self._testStream(None, tokenDoc)
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""
The metrics module contains implementations of various metrics used commonly to
understand how well our models are performing. For e.g. accuracy, vqa_accuracy,
r@1 etc.
For implementing your own metric, you need to follow these steps:
1. Create your own metric class and inherit ``BaseMetric`` class.
2. In the ``__init__`` function of your class, make sure to call
``super().__init__('name')`` where 'name' is the name of your metric. If
you require any parameters in your ``__init__`` function, you can use
keyword arguments to represent them and metric constructor will take care of
providing them to your class from config.
3. Implement a ``calculate`` function which takes in ``SampleList`` and
`model_output` as input and return back a float tensor/number.
4. Register your metric with a key 'name' by using decorator,
``@registry.register_metric('name')``.
Example::
import torch
from pythia.common.registry import registry
from pythia.modules.metrics import BaseMetric
@registry.register_metric("some")
class SomeMetric(BaseMetric):
def __init__(self, some_param=None):
super().__init__("some")
....
def calculate(self, sample_list, model_output):
metric = torch.tensor(2, dtype=torch.float)
return metric
Example config for above metric::
model_attributes:
pythia:
metrics:
- type: some
params:
some_param: a
"""
import collections
import torch
from pythia.common.registry import registry
class Metrics:
"""Internally used by Pythia, Metrics acts as wrapper for handling
calculation of metrics over various metrics specified by the model in
the config. It initializes all of the metrics and when called it runs
calculate on each of them one by one and returns back a dict with proper
naming back. For e.g. an example dict returned by Metrics class:
``{'val/vqa_accuracy': 0.3, 'val/r@1': 0.8}``
Args:
metric_list (List[ConfigNode]): List of ConfigNodes where each ConfigNode
specifies name and parameters of the
metrics used.
"""
def __init__(self, metric_list):
if not isinstance(metric_list, list):
metrics_list = [metric_list]
self.writer = registry.get("writer")
self.metrics = self._init_metrics(metric_list)
def _init_metrics(self, metric_list):
metrics = {}
for metric in metric_list:
params = {}
if isinstance(metric, collections.abc.Mapping):
if not hasattr(metric, "type"):
raise ValueError(
"Metric {} needs to have 'type' attribute".format(metric)
)
metric = metric.type
params = getattr(metric, "params", {})
else:
if not isinstance(metric, str):
raise TypeError(
"Metric {} has inappropriate type"
"'dict' or 'str' allowed".format(metric)
)
metric_cls = registry.get_metric_class(metric)
if metric_cls is None:
raise ValueError(
"No metric named {} registered to registry".format(metric)
)
metrics[metric] = metric_cls(**params)
return metrics
def __call__(self, sample_list, model_output, *args, **kwargs):
values = {}
if not hasattr(sample_list, "targets"):
return values
dataset_type = sample_list.dataset_type
with torch.no_grad():
for metric_name, metric_object in self.metrics.items():
key = "{}/{}".format(dataset_type, metric_name)
values[key] = metric_object._calculate_with_checks(
sample_list, model_output, *args, **kwargs
)
if not isinstance(values[key], torch.Tensor):
values[key] = torch.tensor(values[key], dtype=torch.float)
if values[key].dim() == 0:
values[key] = values[key].view(1)
registry.register(
"{}.{}.{}".format("metrics", sample_list.dataset_name, dataset_type), values
)
return values
class BaseMetric:
"""Base class to be inherited by all metrics registered to Pythia. See
the description on top of the file for more information. Child class must
implement ``calculate`` function.
Args:
name (str): Name of the metric.
"""
def __init__(self, name, *args, **kwargs):
self.name = name
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Abstract method to be implemented by the child class. Takes
in a ``SampleList`` and a dict returned by model as output and
returns back a float tensor/number indicating value for this metric.
Args:
sample_list (SampleList): SampleList provided by the dataloader for the
current iteration.
model_output (Dict): Output dict from the model for the current
SampleList
Returns:
torch.Tensor|float: Value of the metric.
"""
# Override in your child class
raise NotImplementedError(
"'calculate' must be implemented in the child class"
)
def __call__(self, *args, **kwargs):
return self.calculate(*args, **kwargs)
def _calculate_with_checks(self, *args, **kwargs):
value = self.calculate(*args, **kwargs)
return value
@registry.register_metric("accuracy")
class Accuracy(BaseMetric):
"""Metric for calculating accuracy.
**Key:** ``accuracy``
"""
def __init__(self):
super().__init__("accuracy")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate accuracy and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: accuracy.
"""
output = model_output["scores"]
expected = sample_list["targets"]
output = torch.max(output, 1)[1]
correct = (expected == output.squeeze()).sum()
correct = correct
total = len(expected)
value = correct / total
return value
@registry.register_metric("caption_bleu4")
class CaptionBleu4Metric(BaseMetric):
"""Metric for calculating caption accuracy using BLEU4 Score.
**Key:** ``caption_bleu4``
"""
import nltk.translate.bleu_score as bleu_score
def __init__(self):
super().__init__("caption_bleu4")
self.caption_processor = registry.get("coco_caption_processor")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate accuracy and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: bleu4 score.
"""
# Create reference and hypotheses captions.
references = []
hypotheses = []
# References
targets = sample_list.answers
for j, p in enumerate(targets):
img_captions = [
self.caption_processor(c)["tokens"] for c in targets[j].tolist()
]
references.append(img_captions)
# Hypotheses
scores = torch.max(model_output["scores"], dim=-1)[1]
scores = scores.tolist()
predictions = []
for j, p in enumerate(scores):
caption = self.caption_processor(scores[j])["tokens"]
predictions.append(caption)
hypotheses.extend(predictions)
assert len(references) == len(hypotheses)
bleu4 = self.bleu_score.corpus_bleu(references, hypotheses)
return targets.new_tensor(bleu4, dtype=torch.float)
@registry.register_metric("vqa_accuracy")
class VQAAccuracy(BaseMetric):
"""
Calculate VQAAccuracy. Find more information here_
**Key**: ``vqa_accuracy``.
.. _here: https://visualqa.org/evaluation.html
"""
def __init__(self):
super().__init__("vqa_accuracy")
def _masked_unk_softmax(self, x, dim, mask_idx):
x1 = torch.nn.functional.softmax(x, dim=dim)
x1[:, mask_idx] = 0
x1_sum = torch.sum(x1, dim=1, keepdim=True)
y = x1 / x1_sum
return y
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate vqa accuracy and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: VQA Accuracy
"""
output = model_output["scores"]
expected = sample_list["targets"]
output = self._masked_unk_softmax(output, 1, 0)
output = output.argmax(dim=1) # argmax
one_hots = expected.new_zeros(*expected.size())
one_hots.scatter_(1, output.view(-1, 1), 1)
scores = one_hots * expected
accuracy = torch.sum(scores) / expected.size(0)
return accuracy
class RecallAtK(BaseMetric):
def __init__(self, name="recall@k"):
super().__init__(name)
def score_to_ranks(self, scores):
# sort in descending order - largest score gets highest rank
sorted_ranks, ranked_idx = scores.sort(1, descending=True)
# convert from ranked_idx to ranks
ranks = ranked_idx.clone().fill_(0)
for i in range(ranked_idx.size(0)):
for j in range(100):
ranks[i][ranked_idx[i][j]] = j
ranks += 1
return ranks
def get_gt_ranks(self, ranks, ans_ind):
_, ans_ind = ans_ind.max(dim=1)
ans_ind = ans_ind.view(-1)
gt_ranks = torch.LongTensor(ans_ind.size(0))
for i in range(ans_ind.size(0)):
gt_ranks[i] = int(ranks[i, ans_ind[i].long()])
return gt_ranks
def get_ranks(self, sample_list, model_output, *args, **kwargs):
output = model_output["scores"]
expected = sample_list["targets"]
ranks = self.score_to_ranks(output)
gt_ranks = self.get_gt_ranks(ranks, expected)
ranks = self.process_ranks(gt_ranks)
return ranks.float()
def calculate(self, sample_list, model_output, k, *args, **kwargs):
ranks = self.get_ranks(sample_list, model_output)
recall = float(torch.sum(torch.le(ranks, k))) / ranks.size(0)
return recall
@registry.register_metric("r@1")
class RecallAt1(RecallAtK):
"""
Calculate Recall@1 which specifies how many time the chosen candidate
was rank 1.
**Key**: ``r@1``.
"""
def __init__(self):
super().__init__("r@1")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate Recall@1 and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: Recall@1
"""
return self.calculate(sample_list, model_output, k=1)
@registry.register_metric("r@5")
class RecallAt5(RecallAtK):
"""
Calculate Recall@5 which specifies how many time the chosen candidate
was among first 5 rank.
**Key**: ``r@5``.
"""
def __init__(self):
super().__init__("r@5")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate Recall@5 and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: Recall@5
"""
return self.calculate(sample_list, model_output, k=5)
@registry.register_metric("r@10")
class RecallAt10(RecallAtK):
"""
Calculate Recall@10 which specifies how many time the chosen candidate
was among first 10 ranks.
**Key**: ``r@10``.
"""
def __init__(self):
super().__init__("r@10")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate Recall@10 and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: Recall@10
"""
return self.calculate(sample_list, model_output, k=10)
@registry.register_metric("mean_r")
class MeanRank(RecallAtK):
"""
Calculate MeanRank which specifies what was the average rank of the chosen
candidate.
**Key**: ``mean_r``.
"""
def __init__(self):
super().__init__("mean_r")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate Mean Rank and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: mean rank
"""
ranks = self.get_ranks(sample_list, model_output)
return torch.mean(ranks)
@registry.register_metric("mean_rr")
class MeanReciprocalRank(RecallAtK):
"""
Calculate reciprocal of mean rank..
**Key**: ``mean_rr``.
"""
def __init__(self):
super().__init__("mean_rr")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate Mean Reciprocal Rank and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: Mean Reciprocal Rank
"""
ranks = self.get_ranks(sample_list, model_output)
return torch.mean(ranks.reciprocal())
|
import unittest
from federatedml.callbacks.validation_strategy import ValidationStrategy
import numpy as np
from federatedml.util import consts
from federatedml.param.evaluation_param import EvaluateParam
class TestValidationStrategy(unittest.TestCase):
def setUp(self) -> None:
self.role = 'guest'
self.mode = 'hetero'
self.early_stopping_round = 1
self.use_first_metric_only = False
@staticmethod
def generate_fake_eval_metrics(total_rounds, decrease_round, metrics=['ks', 'auc'], start_val=0.8):
assert total_rounds >= decrease_round
eval_result_list = []
start_decrease_round = total_rounds - decrease_round
for i in range(total_rounds):
if i < start_decrease_round:
start_val += 0.01
else:
start_val -= 0.01
eval_dict = {metric: start_val for metric in metrics}
eval_result_list.append(eval_dict)
return eval_result_list
def test_early_stopping(self):
test_rounds = [i for i in range(10, 100)]
decrease_rounds = [np.random.randint(i) for i in test_rounds]
for test_round, decrease_round in zip(test_rounds, decrease_rounds):
eval_dicts = self.generate_fake_eval_metrics(test_round, decrease_round, )
self.early_stopping_round = decrease_round - 1
if self.early_stopping_round <= 0:
continue
validation_strategy = ValidationStrategy(
self.role,
self.mode,
early_stopping_rounds=self.early_stopping_round,
use_first_metric_only=self.use_first_metric_only)
for idx, eval_res in enumerate(eval_dicts):
validation_strategy.performance_recorder.update(eval_res)
check_rs = validation_strategy.check_early_stopping()
if check_rs:
self.assertTrue((test_round - decrease_round + self.early_stopping_round - 1) == idx)
print('test checking passed')
break
def test_use_first_metric_only(self):
def evaluate(param, early_stopping_rounds, use_first_metric_only):
eval_type = param.eval_type
metric_list = param.metrics
first_metric = None
if early_stopping_rounds and use_first_metric_only and len(metric_list) != 0:
single_metric_list = None
if eval_type == consts.BINARY:
single_metric_list = consts.BINARY_SINGLE_VALUE_METRIC
elif eval_type == consts.REGRESSION:
single_metric_list = consts.REGRESSION_SINGLE_VALUE_METRICS
elif eval_type == consts.MULTY:
single_metric_list = consts.MULTI_SINGLE_VALUE_METRIC
for metric in metric_list:
if metric in single_metric_list:
first_metric = metric
break
return first_metric
param_0 = EvaluateParam(metrics=['roc', 'lift', 'ks', 'auc', 'gain'], eval_type='binary')
param_1 = EvaluateParam(metrics=['acc', 'precision', 'auc'], eval_type='binary')
param_2 = EvaluateParam(metrics=['acc', 'precision', 'gain', 'recall', 'lift'], eval_type='binary')
param_3 = EvaluateParam(metrics=['acc', 'precision', 'gain', 'auc', 'recall'], eval_type='multi')
print(evaluate(param_0, 10, True))
print(evaluate(param_1, 10, True))
print(evaluate(param_2, 10, True))
print(evaluate(param_3, 10, True))
def test_best_iter(self):
test_rounds = [i for i in range(10, 100)]
decrease_rounds = [np.random.randint(i) for i in test_rounds]
for test_round, decrease_round in zip(test_rounds, decrease_rounds):
eval_dicts = self.generate_fake_eval_metrics(test_round, decrease_round, )
self.early_stopping_round = decrease_round - 1
if self.early_stopping_round <= 0:
continue
validation_strategy = ValidationStrategy(self.role, self.mode,
early_stopping_rounds=self.early_stopping_round,
use_first_metric_only=self.use_first_metric_only)
for idx, eval_res in enumerate(eval_dicts):
validation_strategy.performance_recorder.update(eval_res)
check_rs = validation_strategy.check_early_stopping()
if check_rs:
best_perform = validation_strategy.performance_recorder.cur_best_performance
self.assertDictEqual(best_perform, eval_dicts[test_round - decrease_round - 1])
print('best iter checking passed')
break
def test_homo_checking(self):
try:
validation_strategy = ValidationStrategy(self.role, mode='homo',
early_stopping_rounds=1)
except Exception as e:
# throwing an error is expected
print(e)
print('error detected {}, homo checking passed'.format(e))
if __name__ == '__main__':
tvs = TestValidationStrategy()
tvs.setUp()
tvs.test_use_first_metric_only()
# tvs.test_early_stopping()
# tvs.test_best_iter()
# tvs.test_homo_checking() # expect checking error !!!
|
"""
Handlers for IPythonDirective's @doctest pseudo-decorator.
The Sphinx extension that provides support for embedded IPython code provides
a pseudo-decorator @doctest, which treats the input/output block as a
doctest, raising a RuntimeError during doc generation if the actual output
(after running the input) does not match the expected output.
An example usage is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
@doctest
In [2]: x + 2
Out[3]: 3
One can also provide arguments to the decorator. The first argument should be
the name of a custom handler. The specification of any other arguments is
determined by the handler. For example,
.. code-block:: rst
.. ipython::
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
allows the actual output ``0.30000000000000004`` to match the expected output
due to a comparison with `np.allclose`.
This module contains handlers for the @doctest pseudo-decorator. Handlers
should have the following function signature::
handler(sphinx_shell, args, input_lines, found, submitted)
where `sphinx_shell` is the embedded Sphinx shell, `args` contains the list
of arguments that follow: '@doctest handler_name', `input_lines` contains
a list of the lines relevant to the current doctest, `found` is a string
containing the output from the IPython shell, and `submitted` is a string
containing the expected output from the IPython shell.
Handlers must be registered in the `doctests` dict at the end of this module.
"""
def str_to_array(s):
"""
Simplistic converter of strings from repr to float NumPy arrays.
If the repr representation has ellipsis in it, then this will fail.
Parameters
----------
s : str
The repr version of a NumPy array.
Examples
--------
>>> s = "array([ 0.3, inf, nan])"
>>> a = str_to_array(s)
"""
import numpy as np
# Need to make sure eval() knows about inf and nan.
# This also assumes default printoptions for NumPy.
from numpy import inf, nan
if s.startswith(u'array'):
# Remove array( and )
s = s[6:-1]
if s.startswith(u'['):
a = np.array(eval(s), dtype=float)
else:
# Assume its a regular float. Force 1D so we can index into it.
a = np.atleast_1d(float(s))
return a
def float_doctest(sphinx_shell, args, input_lines, found, submitted):
"""
Doctest which allow the submitted output to vary slightly from the input.
Here is how it might appear in an rst file:
.. code-block:: rst
.. ipython::
@doctest float
In [1]: 0.1 + 0.2
Out[1]: 0.3
"""
import numpy as np
if len(args) == 2:
rtol = 1e-05
atol = 1e-08
else:
# Both must be specified if any are specified.
try:
rtol = float(args[2])
atol = float(args[3])
except IndexError:
e = ("Both `rtol` and `atol` must be specified "
"if either are specified: {0}".format(args))
raise IndexError(e)
try:
submitted = str_to_array(submitted)
found = str_to_array(found)
except:
# For example, if the array is huge and there are ellipsis in it.
error = True
else:
found_isnan = np.isnan(found)
submitted_isnan = np.isnan(submitted)
error = not np.allclose(found_isnan, submitted_isnan)
error |= not np.allclose(found[~found_isnan],
submitted[~submitted_isnan],
rtol=rtol, atol=atol)
TAB = ' ' * 4
directive = sphinx_shell.directive
if directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = directive.state.document.current_source
# Add tabs and make into a single string.
content = '\n'.join([TAB + line for line in directive.content])
if error:
e = ('doctest float comparison failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines), repr(found),
repr(submitted), TAB=TAB)
raise RuntimeError(e)
# dict of allowable doctest handlers. The key represents the first argument
# that must be given to @doctest in order to activate the handler.
doctests = {
'float': float_doctest,
}
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from typing import Optional
import pkg_resources
import torch
from detectron2.checkpoint import DetectionCheckpointer
from centermask.config import get_cfg
from detectron2.modeling import build_model
class _ModelZooUrls(object):
"""
Mapping from names to officially released Detectron2 pre-trained models.
"""
S3_PREFIX = "https://dl.dropbox.com/s/"
# format: {config_path.yaml} -> model_id/model_final_{commit}.pkl
# https://dl.dropbox.com/s/
CONFIG_PATH_TO_URL_SUFFIX = {
"centermask/centermask_lite_V_39_eSE_FPN_ms_4x.yaml": "uwc0ypa1jvco2bi/centermask2-lite-V-39-eSE-FPN-ms-4x.pth",
"centermask/centermask_lite_V_19_eSE_FPN_ms_4x.yaml ": "dret2ap7djty7mp/centermask2-lite-V-19-eSE-FPN-ms-4x.pth",
"centermask/centermask_V_39_eSE_FPN_ms_3x.yaml" : "tczecsdxt10uai5/centermask2-V-39-eSE-FPN-ms-3x.pth"
}
def get_checkpoint_url(config_path):
"""
Returns the URL to the model trained using the given config
Args:
config_path (str): config file name relative to centermask's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
Returns:
str: a URL to the model
"""
name = config_path.replace(".yaml", "")
if config_path in _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX:
suffix = _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX[config_path]
return _ModelZooUrls.S3_PREFIX + "/" + suffix
raise RuntimeError("{} not available in Model Zoo!".format(name))
def get_config_file(config_path):
"""
Returns path to a builtin config file.
Args:
config_path (str): config file name relative to centermask's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
Returns:
str: the real path to the config file.
"""
cfg_file = pkg_resources.resource_filename(
"centermask.model_zoo", os.path.join("configs", config_path)
)
if not os.path.exists(cfg_file):
raise RuntimeError("{} not available in Model Zoo!".format(config_path))
return cfg_file
|
#!/usr/bin/env python
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""Tests on internal helper functions"""
import unittest
import sys
import decimal
import xml.etree.ElementTree as ElementTree
from collections import OrderedDict
from xmlschema import XMLSchema, XMLSchemaParseError
from xmlschema.etree import etree_element, prune_etree
from xmlschema.qnames import XSI_TYPE, XSD_SCHEMA, XSD_ELEMENT, XSD_SIMPLE_TYPE, \
XSD_ANNOTATION, get_namespace, get_qname, local_name, qname_to_prefixed
from xmlschema.namespaces import XSD_NAMESPACE, XSI_NAMESPACE
from xmlschema.helpers import get_xsd_annotation, get_xsd_derivation_attribute, \
get_xsd_form_attribute, raw_xml_encode, count_digits, strictly_equal, \
iter_nested_items, ParticleCounter
class TestHelpers(unittest.TestCase):
@classmethod
def setUpClass(cls):
XMLSchema.meta_schema.build()
@classmethod
def tearDownClass(cls):
XMLSchema.meta_schema.clear()
def test_get_namespace_function(self):
self.assertEqual(get_namespace(XSD_SIMPLE_TYPE), XSD_NAMESPACE)
self.assertEqual(get_namespace(''), '')
self.assertEqual(get_namespace(None), '')
self.assertEqual(get_namespace('{}name'), '')
self.assertEqual(get_namespace('{ }name'), ' ')
self.assertEqual(get_namespace('{ ns }name'), ' ns ')
def test_get_qname_functions(self):
self.assertEqual(get_qname(XSD_NAMESPACE, 'element'), XSD_ELEMENT)
self.assertEqual(get_qname(XSI_NAMESPACE, 'type'), XSI_TYPE)
self.assertEqual(get_qname(XSI_NAMESPACE, ''), '')
self.assertEqual(get_qname(XSI_NAMESPACE, None), None)
self.assertEqual(get_qname(XSI_NAMESPACE, 0), 0)
self.assertEqual(get_qname(XSI_NAMESPACE, False), False)
self.assertRaises(TypeError, get_qname, XSI_NAMESPACE, True)
self.assertEqual(get_qname(None, True), True)
self.assertEqual(get_qname(None, 'element'), 'element')
self.assertEqual(get_qname(None, ''), '')
self.assertEqual(get_qname('', 'element'), 'element')
def test_local_name_functions(self):
self.assertEqual(local_name(XSD_SCHEMA), 'schema')
self.assertEqual(local_name('schema'), 'schema')
self.assertEqual(local_name(''), '')
self.assertEqual(local_name(None), None)
self.assertRaises(ValueError, local_name, '{ns name')
self.assertRaises(TypeError, local_name, 1.0)
self.assertRaises(TypeError, local_name, 0)
def test_qname_to_prefixed_functions(self):
namespaces = {'xs': XSD_NAMESPACE, 'xsi': XSI_NAMESPACE}
self.assertEqual(qname_to_prefixed(XSD_ELEMENT, namespaces), 'xs:element')
self.assertEqual(qname_to_prefixed('xs:element', namespaces), 'xs:element')
self.assertEqual(qname_to_prefixed('element', namespaces), 'element')
self.assertEqual(qname_to_prefixed('', namespaces), '')
self.assertEqual(qname_to_prefixed(None, namespaces), None)
self.assertEqual(qname_to_prefixed(0, namespaces), 0)
self.assertEqual(qname_to_prefixed(XSI_TYPE, {}), XSI_TYPE)
self.assertEqual(qname_to_prefixed(None, {}), None)
self.assertEqual(qname_to_prefixed('', {}), '')
self.assertEqual(qname_to_prefixed('type', {'': XSI_NAMESPACE}), 'type')
self.assertEqual(qname_to_prefixed('type', {'': ''}), 'type')
self.assertEqual(qname_to_prefixed('{}type', {'': ''}), 'type')
self.assertEqual(qname_to_prefixed('{}type', {'': ''}, use_empty=False), '{}type')
# Attention! in XML the empty namespace (that means no namespace) can be
# associated only with empty prefix, so these cases should never happen.
self.assertEqual(qname_to_prefixed('{}type', {'p': ''}), 'p:type')
self.assertEqual(qname_to_prefixed('type', {'p': ''}), 'type')
self.assertEqual(qname_to_prefixed('{ns}type', {'': 'ns'}, use_empty=True), 'type')
self.assertEqual(qname_to_prefixed('{ns}type', {'': 'ns'}, use_empty=False), '{ns}type')
self.assertEqual(qname_to_prefixed('{ns}type', {'': 'ns', 'p': 'ns'}, use_empty=True), 'p:type')
self.assertEqual(qname_to_prefixed('{ns}type', {'': 'ns', 'p': 'ns'}, use_empty=False), 'p:type')
self.assertEqual(qname_to_prefixed('{ns}type', {'': 'ns', 'p': 'ns0'}, use_empty=True), 'type')
self.assertEqual(qname_to_prefixed('{ns}type', {'': 'ns', 'p': 'ns0'}, use_empty=False), '{ns}type')
def test_get_xsd_annotation(self):
elem = etree_element(XSD_SCHEMA)
self.assertIsNone(get_xsd_annotation(elem))
elem.append(etree_element(XSD_ANNOTATION))
self.assertEqual(get_xsd_annotation(elem), elem[0])
elem.append(etree_element(XSD_ELEMENT))
self.assertEqual(get_xsd_annotation(elem), elem[0])
elem.clear()
elem.append(etree_element(XSD_ELEMENT))
self.assertIsNone(get_xsd_annotation(elem))
elem.append(etree_element(XSD_ANNOTATION))
self.assertIsNone(get_xsd_annotation(elem))
def test_get_xsd_derivation_attribute(self):
elem = etree_element(XSD_ELEMENT, attrib={
'a1': 'extension', 'a2': ' restriction', 'a3': '#all', 'a4': 'other',
'a5': 'restriction extension restriction ', 'a6': 'other restriction'
})
values = ('extension', 'restriction')
self.assertEqual(get_xsd_derivation_attribute(elem, 'a1', values), 'extension')
self.assertEqual(get_xsd_derivation_attribute(elem, 'a2', values), ' restriction')
self.assertEqual(get_xsd_derivation_attribute(elem, 'a3', values), 'extension restriction')
self.assertRaises(ValueError, get_xsd_derivation_attribute, elem, 'a4', values)
self.assertEqual(get_xsd_derivation_attribute(elem, 'a5', values), 'restriction extension restriction ')
self.assertRaises(ValueError, get_xsd_derivation_attribute, elem, 'a6', values)
self.assertEqual(get_xsd_derivation_attribute(elem, 'a7', values), '')
def test_get_xsd_form_attribute(self):
elem = etree_element(XSD_ELEMENT, attrib={
'a1': 'qualified', 'a2': ' qualified', 'a3': 'unqualified', 'a4': ''
})
self.assertEqual(get_xsd_form_attribute(elem, 'a1'), 'qualified')
self.assertRaises(ValueError, get_xsd_form_attribute, elem, 'a2')
self.assertEqual(get_xsd_form_attribute(elem, 'a3'), 'unqualified')
self.assertRaises(ValueError, get_xsd_form_attribute, elem, 'a4')
self.assertIsNone(get_xsd_form_attribute(elem, 'a5'))
def test_parse_component(self):
component = XMLSchema.meta_schema.types['anyType']
elem = etree_element(XSD_SCHEMA)
self.assertIsNone(component._parse_child_component(elem))
elem.append(etree_element(XSD_ELEMENT))
self.assertEqual(component._parse_child_component(elem), elem[0])
elem.append(etree_element(XSD_SIMPLE_TYPE))
self.assertRaises(XMLSchemaParseError, component._parse_child_component, elem)
self.assertEqual(component._parse_child_component(elem, strict=False), elem[0])
elem.clear()
elem.append(etree_element(XSD_ANNOTATION))
self.assertIsNone(component._parse_child_component(elem))
elem.append(etree_element(XSD_SIMPLE_TYPE))
self.assertEqual(component._parse_child_component(elem), elem[1])
elem.append(etree_element(XSD_ELEMENT))
self.assertRaises(XMLSchemaParseError, component._parse_child_component, elem)
self.assertEqual(component._parse_child_component(elem, strict=False), elem[1])
elem.clear()
elem.append(etree_element(XSD_ANNOTATION))
elem.append(etree_element(XSD_ANNOTATION))
self.assertIsNone(component._parse_child_component(elem, strict=False))
elem.append(etree_element(XSD_SIMPLE_TYPE))
self.assertEqual(component._parse_child_component(elem), elem[2])
def test_raw_xml_encode_function(self):
self.assertEqual(raw_xml_encode(True), 'true')
self.assertEqual(raw_xml_encode(False), 'false')
self.assertEqual(raw_xml_encode(10), '10')
self.assertEqual(raw_xml_encode(0), '0')
self.assertEqual(raw_xml_encode(1), '1')
self.assertEqual(raw_xml_encode('alpha'), 'alpha')
self.assertEqual(raw_xml_encode([10, 20, 30]), '10 20 30')
self.assertEqual(raw_xml_encode((10, 20, 30)), '10 20 30')
def test_count_digits_function(self):
self.assertEqual(count_digits(10), (2, 0))
self.assertEqual(count_digits(-10), (2, 0))
self.assertEqual(count_digits(081.2), (2, 1))
self.assertEqual(count_digits(-081.200), (2, 1))
self.assertEqual(count_digits(0.51), (0, 2))
self.assertEqual(count_digits(-0.510), (0, 2))
self.assertEqual(count_digits(-0.510), (0, 2))
self.assertEqual(count_digits(decimal.Decimal('100.0')), (3, 0))
self.assertEqual(count_digits(decimal.Decimal('100.01')), (3, 2))
self.assertEqual(count_digits('100.01'), (3, 2))
self.assertEqual(count_digits(1E-11), (0, 11))
self.assertEqual(count_digits(b'100.0E+4'), (7, 0))
self.assertEqual(count_digits(decimal.Decimal('100.0E+4')), (7, 0))
self.assertEqual(count_digits(decimal.Decimal('100.00001E+4')), (7, 1))
self.assertEqual(count_digits(decimal.Decimal('0100.00E4')), (7, 0))
self.assertEqual(count_digits(decimal.Decimal('0100.00E12')), (15, 0))
self.assertEqual(count_digits(decimal.Decimal('0100.00E19')), (22, 0))
self.assertEqual(count_digits(decimal.Decimal('100.0E-4')), (0, 2))
self.assertEqual(count_digits(decimal.Decimal('0100.00E-4')), (0, 2))
self.assertEqual(count_digits(decimal.Decimal('0100.00E-8')), (0, 6))
self.assertEqual(count_digits(decimal.Decimal('0100.00E-9')), (0, 7))
self.assertEqual(count_digits(decimal.Decimal('0100.00E-12')), (0, 10))
self.assertEqual(count_digits(decimal.Decimal('100.10E-4')), (0, 5))
self.assertEqual(count_digits(decimal.Decimal('0100.10E-12')), (0, 13))
def test_strictly_equal_function(self):
self.assertTrue(strictly_equal(10, 10))
self.assertFalse(strictly_equal(10, 10.0))
def test_iter_nested_items_function(self):
if sys.version_info >= (3, 6):
self.assertListEqual(list(iter_nested_items({'a': 10, 'b': 20})), [10, 20])
self.assertListEqual(list(iter_nested_items([{'a': 10, 'b': 20}, 30])), [10, 20, 30])
with self.assertRaises(TypeError):
list(iter_nested_items({'a': 10, 'b': 20}, dict_class=OrderedDict))
with self.assertRaises(TypeError):
list(iter_nested_items([10, 20], list_class=tuple))
def test_particle_counter_class(self):
counter = ParticleCounter()
self.assertEqual(repr(counter), 'ParticleCounter(0, 0)')
other = ParticleCounter() # Only for test isolation, usually it's a particle.
other.min_occurs = 5
other.max_occurs = 10
counter += other
self.assertEqual(repr(counter), 'ParticleCounter(5, 10)')
counter *= other
self.assertEqual(repr(counter), 'ParticleCounter(25, 100)')
counter = ParticleCounter()
counter.max_occurs = None
self.assertEqual(repr(counter), 'ParticleCounter(0, None)')
self.assertEqual(repr(counter * other), 'ParticleCounter(0, None)')
self.assertEqual(repr(counter + other), 'ParticleCounter(5, None)')
self.assertEqual(repr(counter * other), 'ParticleCounter(25, None)')
counter.reset()
self.assertEqual(repr(counter), 'ParticleCounter(0, 0)')
counter.max_occurs = None
other.min_occurs = other.max_occurs = 0
self.assertEqual(repr(counter * other), 'ParticleCounter(0, 0)')
counter.reset()
other.min_occurs = 0
other.max_occurs = None
self.assertEqual(repr(counter * other), 'ParticleCounter(0, 0)')
self.assertEqual(repr(counter + other), 'ParticleCounter(0, None)')
self.assertEqual(repr(counter + other), 'ParticleCounter(0, None)')
counter.max_occurs = 1
self.assertEqual(repr(counter * other), 'ParticleCounter(0, None)')
class TestElementTreeHelpers(unittest.TestCase):
def test_prune_etree_function(self):
root = ElementTree.XML('<A id="0"><B/><C/><D/></A>')
self.assertFalse(prune_etree(root, lambda x: x.tag == 'C'))
self.assertListEqual([e.tag for e in root.iter()], ['A', 'B', 'D'])
self.assertEqual(root.attrib, {'id': '0'})
root = ElementTree.XML('<A id="1"><B/><C/><D/></A>')
self.assertTrue(prune_etree(root, lambda x: x.tag != 'C'))
self.assertListEqual([e.tag for e in root.iter()], ['A'])
self.assertEqual(root.attrib, {'id': '1'})
class SelectorClass:
tag = 'C'
@classmethod
def class_method(cls, elem):
return elem.tag == cls.tag
def method(self, elem):
return elem.tag != self.tag
selector = SelectorClass()
root = ElementTree.XML('<A id="0"><B/><C/><D/></A>')
self.assertFalse(prune_etree(root, selector.class_method))
self.assertListEqual([e.tag for e in root.iter()], ['A', 'B', 'D'])
self.assertEqual(root.attrib, {'id': '0'})
root = ElementTree.XML('<A id="1"><B/><C/><D/></A>')
self.assertTrue(prune_etree(root, selector.method))
self.assertListEqual([e.tag for e in root.iter()], ['A'])
self.assertEqual(root.attrib, {'id': '1'})
if __name__ == '__main__':
from xmlschema.testing import print_test_header
print_test_header()
unittest.main()
|
from hubcheck.pageobjects.basepagewidget import BasePageWidget
from hubcheck.pageobjects.basepageelement import TextReadOnly
import re
class TicketListSearchForm(BasePageWidget):
def __init__(self, owner, locatordict={}):
super(TicketListSearchForm,self).__init__(owner,locatordict)
# load hub's classes
TicketListSearchForm_Locators = self.load_class('TicketListSearchForm_Locators')
TicketListFilterOptions = self.load_class('TicketListFilterOptions')
self.TicketListSearchResultRow = self.load_class('TicketListSearchResultRow')
ListPageNav = self.load_class('ListPageNav')
# update this object's locator
self.locators.update(TicketListSearchForm_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.rowlocid = 0
self.filteroptions = TicketListFilterOptions(self,{'base':'filteroptions'})
self.footer = ListPageNav(self,{'base':'footer'})
# update the component's locators with this objects overrides
self._updateLocators()
def _checkLocatorsNonAdmin(self,widgets=None,cltype='NonAdmin'):
if self.ticket_rows_displayed() > 0:
row = self.ticket_row_by_index(1)
row._checkLocatorsNonAdmin()
self._checkLocators(widgets,cltype)
def _checkLocatorsAdmin(self,widgets=None,cltype='Admin'):
if self.ticket_rows_displayed() > 0:
row = self.ticket_row_by_index(1)
row._checkLocatorsAdmin()
self._checkLocators(widgets,cltype)
def filter_by_keyword(self,value):
return self.filteroptions.filter_by_keyword(value)
def filter_by_dropdown(self,value):
return self.filteroptions.filter_by_dropdown(value)
def ticket_rows_displayed(self):
return len(self.find_elements(self.locators['row']))
def ticket_row_by_index(self,index):
maxindex = self.ticket_rows_displayed()
if maxindex == 0:
raise ValueError("no rows available in table")
if index < 1 or index > maxindex:
err = "provided row index (%d) outside of available range (1-%d)"
raise ValueError(err % (index,maxindex))
# store a new rowlocator in the locators dictionary
rowlocator = self.locators['rowindex'].format(row_num=index)
self.rowlocid += 1
locid = "rowbyindex%d" % self.rowlocid
self.locators[locid] = rowlocator
# return the row
return self.TicketListSearchResultRow(self,{'base':locid})
def goto_page_number(self,pagenumber):
return self.footer.goto_page_number(pagenumber)
def goto_page_relative(self,relation):
return self.footer.goto_page_relative(relation)
def get_pagination_counts(self):
return self.footer.get_pagination_counts()
def display_limit(self,limit=None):
return self.footer.display_limit(limit)
class TicketListSearchForm_Locators_Base(object):
"""locators for TicketListSearchForm object"""
locators = {
'base' : "css=#main form",
'filteroptions' : "css=.filters",
'footer' : "css=.list-footer",
'row' : "css=#tktlist tbody tr",
'rowindex' : "css=#tktlist tbody tr:nth-of-type({row_num})",
}
|
# Generated by Django 3.0.7 on 2020-06-12 12:06
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('reference', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='reference',
options={'ordering': ('-publish',)},
),
migrations.AddField(
model_name='reference',
name='publish',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.